diff --git a/.github/workflows/_docker-build.yml b/.github/workflows/_docker-build.yml index 227b0d7482ae..b6a74d1b4677 100644 --- a/.github/workflows/_docker-build.yml +++ b/.github/workflows/_docker-build.yml @@ -101,6 +101,7 @@ jobs: attempt_delay: 9000 # 9 secs with: | pull: true + sbom: true platforms: ${{ matrix.platform.docker }} context: "{{defaultContext}}:${{ inputs.file-dir }}" outputs: type=image,name=${{ inputs.namespace-repository }},push-by-digest=true,name-canonical=true,push=true diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index aba3726017fd..a7c099d8101f 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -51,6 +51,55 @@ jobs: short_sha: ${{ steps.upload.outputs.SHORT_SHA }} dir: ${{ steps.upload.outputs.DIR }} + superexec: + runs-on: ubuntu-22.04 + timeout-minutes: 10 + needs: wheel + strategy: + fail-fast: false + matrix: + python-version: ["3.9", "3.10", "3.11"] + directory: [e2e-bare-auth] + connection: [secure, insecure] + engine: [deployment-engine, simulation-engine] + authentication: [no-auth, client-auth] + exclude: + - connection: insecure + authentication: client-auth + name: | + SuperExec / + Python ${{ matrix.python-version }} / + ${{ matrix.connection }} / + ${{ matrix.authentication }} / + ${{ matrix.engine }} + defaults: + run: + working-directory: e2e/${{ matrix.directory }} + steps: + - uses: actions/checkout@v4 + - name: Bootstrap + uses: ./.github/actions/bootstrap + with: + python-version: ${{ matrix.python-version }} + poetry-skip: 'true' + - name: Download and install Flower wheel from artifact store + if: ${{ github.repository == 'adap/flower' && !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' }} + run: | + # Define base URL for wheel file + WHEEL_URL="https://${{ env.ARTIFACT_BUCKET }}/py/${{ needs.wheel.outputs.dir }}/${{ needs.wheel.outputs.short_sha }}/${{ needs.wheel.outputs.whl_path }}" + if [[ "${{ matrix.engine }}" == "simulation-engine" ]]; then + python -m pip install "flwr[simulation] @ ${WHEEL_URL}" + else + python -m pip install "${WHEEL_URL}" + fi + - name: > + Run SuperExec test / + ${{ matrix.connection }} / + ${{ matrix.authentication }} / + ${{ matrix.engine }} + working-directory: e2e/${{ matrix.directory }} + run: ./../test_superexec.sh "${{ matrix.connection }}" "${{ matrix.authentication}}" "${{ matrix.engine }}" + frameworks: runs-on: ubuntu-22.04 timeout-minutes: 10 diff --git a/README.md b/README.md index 9f2604ad37b0..1be37ed391f7 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,7 @@ [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](https://github.com/adap/flower/blob/main/CONTRIBUTING.md) ![Build](https://github.com/adap/flower/actions/workflows/framework.yml/badge.svg) [![Downloads](https://static.pepy.tech/badge/flwr)](https://pepy.tech/project/flwr) +[![Docker Hub](https://img.shields.io/badge/Docker%20Hub-flwr-blue)](https://hub.docker.com/u/flwr) [![Slack](https://img.shields.io/badge/Chat-Slack-red)](https://flower.ai/join-slack) Flower (`flwr`) is a framework for building federated learning systems. The diff --git a/datasets/doc/source/index.rst b/datasets/doc/source/index.rst index 84e25a920f2f..070655550fa1 100644 --- a/datasets/doc/source/index.rst +++ b/datasets/doc/source/index.rst @@ -3,6 +3,15 @@ Flower Datasets Flower Datasets (``flwr-datasets``) is a library that enables the quick and easy creation of datasets for federated learning/analytics/evaluation. It enables heterogeneity (non-iidness) simulation and division of datasets with the preexisting notion of IDs. The library was created by the ``Flower Labs`` team that also created `Flower `_ : A Friendly Federated Learning Framework. +.. raw:: html + + + + + Flower Datasets Framework ------------------------- diff --git a/dev/build-docker-image-matrix.py b/dev/build-docker-image-matrix.py index c19949e358b9..52c96e3cca7a 100644 --- a/dev/build-docker-image-matrix.py +++ b/dev/build-docker-image-matrix.py @@ -134,7 +134,7 @@ def tag_latest_ubuntu_with_flwr_version(image: BaseImage) -> List[str]: ubuntu_base_images = generate_base_images( flwr_version, SUPPORTED_PYTHON_VERSIONS, - [Distro(DistroName.UBUNTU, "22.04")], + [Distro(DistroName.UBUNTU, "24.04")], ) # alpine base images for the latest supported python version alpine_base_images = generate_base_images( diff --git a/dev/format.sh b/dev/format.sh index e1e2abc307f1..ada5a7f13abc 100755 --- a/dev/format.sh +++ b/dev/format.sh @@ -36,3 +36,6 @@ python -m nbstripout examples/*/*.ipynb --extra-keys "$KEYS" # Markdown python -m mdformat --number doc/source examples + +# RST +docstrfmt doc/source diff --git a/dev/test.sh b/dev/test.sh index 58ac0b3d24cd..170d9f4acd1e 100755 --- a/dev/test.sh +++ b/dev/test.sh @@ -56,6 +56,14 @@ echo "- mdformat: done" echo "- All Markdown checks passed" +echo "- Start rST checks" + +echo "- docstrfmt: start" +docstrfmt --check doc/source +echo "- docstrfmt: done" + +echo "- All rST checks passed" + echo "- Start license checks" echo "- copyright: start" diff --git a/doc/locales/fr/LC_MESSAGES/framework-docs.po b/doc/locales/fr/LC_MESSAGES/framework-docs.po index 681916e78ed5..e6cd61627bf6 100644 --- a/doc/locales/fr/LC_MESSAGES/framework-docs.po +++ b/doc/locales/fr/LC_MESSAGES/framework-docs.po @@ -3,7 +3,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower Docs\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2024-09-24 00:29+0000\n" +"POT-Creation-Date: 2024-09-27 00:30+0000\n" "PO-Revision-Date: 2023-09-05 17:54+0000\n" "Last-Translator: Charles Beauville \n" "Language: fr\n" @@ -58,23 +58,23 @@ msgid "" "or not by reading the Flower source code." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:22 +#: ../../source/contributor-explanation-public-and-private-apis.rst:23 #, fuzzy msgid "Flower public API" msgstr "Flower ClientApp." -#: ../../source/contributor-explanation-public-and-private-apis.rst:24 +#: ../../source/contributor-explanation-public-and-private-apis.rst:25 msgid "Flower has a well-defined public API. Let's look at this in more detail." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:28 +#: ../../source/contributor-explanation-public-and-private-apis.rst:29 msgid "" "Every component that is reachable by recursively following " "``__init__.__all__`` starting from the root package (``flwr``) is part of" " the public API." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:30 +#: ../../source/contributor-explanation-public-and-private-apis.rst:32 msgid "" "If you want to determine whether a component " "(class/function/generator/...) is part of the public API or not, you need" @@ -82,13 +82,13 @@ msgid "" "src/py/flwr`` to look at the Python sub-packages contained ``flwr``:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:43 +#: ../../source/contributor-explanation-public-and-private-apis.rst:46 msgid "" "Contrast this with the definition of ``__all__`` in the root " "``src/py/flwr/__init__.py``:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:55 +#: ../../source/contributor-explanation-public-and-private-apis.rst:59 msgid "" "You can see that ``flwr`` has six subpackages (``cli``, ``client``, " "``common``, ``proto``, ``server``, ``simulation``), but only four of them" @@ -96,7 +96,7 @@ msgid "" "``simulation``)." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:57 +#: ../../source/contributor-explanation-public-and-private-apis.rst:63 msgid "" "What does this mean? It means that ``client``, ``common``, ``server`` and" " ``simulation`` are part of the public API, but ``cli`` and ``proto`` are" @@ -107,21 +107,21 @@ msgid "" "even be removed completely." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:62 +#: ../../source/contributor-explanation-public-and-private-apis.rst:70 msgid "Therefore, as a Flower user:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:64 +#: ../../source/contributor-explanation-public-and-private-apis.rst:72 msgid "``from flwr import client`` ✅ Ok, you're importing a public API." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:65 +#: ../../source/contributor-explanation-public-and-private-apis.rst:73 msgid "" "``from flwr import proto`` ❌ Not recommended, you're importing a private " "API." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:67 +#: ../../source/contributor-explanation-public-and-private-apis.rst:75 msgid "" "What about components that are nested deeper in the hierarchy? Let's look" " at Flower strategies to see another typical pattern. Flower strategies " @@ -130,7 +130,7 @@ msgid "" "``src/py/flwr/server/strategy/__init__.py``:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:81 +#: ../../source/contributor-explanation-public-and-private-apis.rst:91 msgid "" "What's notable here is that all strategies are implemented in dedicated " "modules (e.g., ``fedavg.py``). In ``__init__.py``, we *import* the " @@ -142,33 +142,33 @@ msgid "" "the public API (as long as we update the import path in ``__init__.py``)." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:86 +#: ../../source/contributor-explanation-public-and-private-apis.rst:99 msgid "Therefore:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:88 +#: ../../source/contributor-explanation-public-and-private-apis.rst:101 msgid "" "``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a " "class that is part of the public API." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:89 +#: ../../source/contributor-explanation-public-and-private-apis.rst:103 msgid "" "``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're " "importing a private module." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:91 +#: ../../source/contributor-explanation-public-and-private-apis.rst:106 msgid "" "This approach is also implemented in the tooling that automatically " "builds API reference docs." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:94 +#: ../../source/contributor-explanation-public-and-private-apis.rst:110 msgid "Flower public API of private packages" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:96 +#: ../../source/contributor-explanation-public-and-private-apis.rst:112 msgid "" "We also use this to define the public API of private subpackages. Public," " in this context, means the API that other ``flwr`` subpackages should " @@ -176,14 +176,14 @@ msgid "" "not exported via ``src/py/flwr/server/__init__.py``'s ``__all__``)." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:100 +#: ../../source/contributor-explanation-public-and-private-apis.rst:117 msgid "" "Still, the private sub-package ``flwr.server.driver`` defines a " "\"public\" API using ``__all__`` in " "``src/py/flwr/server/driver/__init__.py``:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:114 +#: ../../source/contributor-explanation-public-and-private-apis.rst:132 msgid "" "The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` " "are never used by Flower framework users, only by other parts of the " @@ -195,7 +195,7 @@ msgid "" "``InMemoryDriver`` class definition)." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:117 +#: ../../source/contributor-explanation-public-and-private-apis.rst:140 msgid "" "This is because ``flwr.server.driver`` defines a public interface for " "other ``flwr`` subpackages. This allows codeowners of " @@ -224,23 +224,23 @@ msgid "" "development environment." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:12 +#: ../../source/contributor-how-to-build-docker-images.rst:13 #, fuzzy msgid "Clone the ``flower`` repository." msgstr "**Fourche le dépôt de Flower**" -#: ../../source/contributor-how-to-build-docker-images.rst:18 +#: ../../source/contributor-how-to-build-docker-images.rst:19 msgid "Verify the Docker daemon is running." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:20 +#: ../../source/contributor-how-to-build-docker-images.rst:21 msgid "" "The build instructions that assemble the images are located in the " "respective Dockerfiles. You can find them in the subdirectories of " "``src/docker``." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:23 +#: ../../source/contributor-how-to-build-docker-images.rst:24 msgid "" "Flower Docker images are configured via build arguments. Through build " "arguments, we can make the creation of images more flexible. For example," @@ -251,149 +251,149 @@ msgid "" "below." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:30 +#: ../../source/contributor-how-to-build-docker-images.rst:32 #, fuzzy msgid "Building the Base Image" msgstr "Chargement des données" -#: ../../source/contributor-how-to-build-docker-images.rst:36 -#: ../../source/contributor-how-to-build-docker-images.rst:98 +#: ../../source/contributor-how-to-build-docker-images.rst:38 +#: ../../source/contributor-how-to-build-docker-images.rst:104 #, fuzzy msgid "Build argument" msgstr "Amélioration de la documentation" -#: ../../source/contributor-how-to-build-docker-images.rst:37 -#: ../../source/contributor-how-to-build-docker-images.rst:99 +#: ../../source/contributor-how-to-build-docker-images.rst:39 +#: ../../source/contributor-how-to-build-docker-images.rst:105 #, fuzzy msgid "Description" msgstr "Dépréciations" -#: ../../source/contributor-how-to-build-docker-images.rst:38 -#: ../../source/contributor-how-to-build-docker-images.rst:100 +#: ../../source/contributor-how-to-build-docker-images.rst:40 +#: ../../source/contributor-how-to-build-docker-images.rst:106 #, fuzzy msgid "Required" msgstr "Changements nécessaires" -#: ../../source/contributor-how-to-build-docker-images.rst:39 -#: ../../source/contributor-how-to-build-docker-images.rst:101 -#: ../../source/docker/persist-superlink-state.rst:18 -#: ../../source/docker/pin-version.rst:11 +#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:107 +#: ../../source/docker/persist-superlink-state.rst:19 +#: ../../source/docker/pin-version.rst:12 #: ../../source/docker/set-environment-variables.rst:8 #, fuzzy msgid "Example" msgstr "Exemples de PyTorch" -#: ../../source/contributor-how-to-build-docker-images.rst:40 +#: ../../source/contributor-how-to-build-docker-images.rst:42 msgid "``DISTRO``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:43 #, fuzzy msgid "The Linux distribution to use as the base image." msgstr "Chargement des données" -#: ../../source/contributor-how-to-build-docker-images.rst:42 -#: ../../source/contributor-how-to-build-docker-images.rst:46 -#: ../../source/contributor-how-to-build-docker-images.rst:50 -#: ../../source/contributor-how-to-build-docker-images.rst:66 -#: ../../source/contributor-how-to-build-docker-images.rst:70 -#: ../../source/contributor-how-to-build-docker-images.rst:104 +#: ../../source/contributor-how-to-build-docker-images.rst:44 +#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:52 +#: ../../source/contributor-how-to-build-docker-images.rst:68 +#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:110 #, fuzzy msgid "No" msgstr "Aucun" -#: ../../source/contributor-how-to-build-docker-images.rst:43 +#: ../../source/contributor-how-to-build-docker-images.rst:45 msgid "``ubuntu``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:44 +#: ../../source/contributor-how-to-build-docker-images.rst:46 #, fuzzy msgid "``DISTRO_VERSION``" msgstr "Version Python" -#: ../../source/contributor-how-to-build-docker-images.rst:45 +#: ../../source/contributor-how-to-build-docker-images.rst:47 msgid "Version of the Linux distribution." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:47 +#: ../../source/contributor-how-to-build-docker-images.rst:49 msgid ":substitution-code:`|ubuntu_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:50 #, fuzzy msgid "``PYTHON_VERSION``" msgstr "Version Python" -#: ../../source/contributor-how-to-build-docker-images.rst:49 +#: ../../source/contributor-how-to-build-docker-images.rst:51 msgid "Version of ``python`` to be installed." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:51 +#: ../../source/contributor-how-to-build-docker-images.rst:53 msgid "``3.11`` or ``3.11.1``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:52 +#: ../../source/contributor-how-to-build-docker-images.rst:54 msgid "``PIP_VERSION``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:53 +#: ../../source/contributor-how-to-build-docker-images.rst:55 msgid "Version of ``pip`` to be installed." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:54 -#: ../../source/contributor-how-to-build-docker-images.rst:58 -#: ../../source/contributor-how-to-build-docker-images.rst:62 -#: ../../source/contributor-how-to-build-docker-images.rst:108 +#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:60 +#: ../../source/contributor-how-to-build-docker-images.rst:64 +#: ../../source/contributor-how-to-build-docker-images.rst:114 #, fuzzy msgid "Yes" msgstr "Types" -#: ../../source/contributor-how-to-build-docker-images.rst:55 +#: ../../source/contributor-how-to-build-docker-images.rst:57 msgid ":substitution-code:`|pip_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:58 msgid "``SETUPTOOLS_VERSION``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:57 +#: ../../source/contributor-how-to-build-docker-images.rst:59 msgid "Version of ``setuptools`` to be installed." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:59 +#: ../../source/contributor-how-to-build-docker-images.rst:61 msgid ":substitution-code:`|setuptools_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:60 +#: ../../source/contributor-how-to-build-docker-images.rst:62 msgid "``FLWR_VERSION``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:61 +#: ../../source/contributor-how-to-build-docker-images.rst:63 msgid "Version of Flower to be installed." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:63 +#: ../../source/contributor-how-to-build-docker-images.rst:65 msgid ":substitution-code:`|stable_flwr_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:64 +#: ../../source/contributor-how-to-build-docker-images.rst:66 msgid "``FLWR_PACKAGE``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:65 +#: ../../source/contributor-how-to-build-docker-images.rst:67 msgid "The Flower package to be installed." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:67 +#: ../../source/contributor-how-to-build-docker-images.rst:69 msgid "``flwr`` or ``flwr-nightly``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:68 +#: ../../source/contributor-how-to-build-docker-images.rst:70 #, fuzzy msgid "``FLWR_VERSION_REF``" msgstr "Version Python" -#: ../../source/contributor-how-to-build-docker-images.rst:69 +#: ../../source/contributor-how-to-build-docker-images.rst:71 msgid "" "A `direct reference " "`_." msgstr "" -#: ../../source/contributor-how-to-contribute-translations.rst:29 +#: ../../source/contributor-how-to-contribute-translations.rst:28 msgid "" "Once you are signed in to Weblate, you can navigate to the `Flower " "Framework project `_." msgstr "" -#: ../../source/contributor-how-to-contribute-translations.rst:67 +#: ../../source/contributor-how-to-contribute-translations.rst:64 msgid "Add new languages" msgstr "" -#: ../../source/contributor-how-to-contribute-translations.rst:69 +#: ../../source/contributor-how-to-contribute-translations.rst:66 msgid "" "If you want to add a new language, you will first have to contact us, " "either on `Slack `_, or by opening an issue" @@ -598,17 +598,17 @@ msgstr "" "formater le code ou exécuter des tests. À cette fin, nous utilisons " "l'extension VSCode Remote Containers. Qu'est-ce que c'est ?" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:7 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:8 +#, fuzzy msgid "" "The Visual Studio Code Remote - Containers extension lets you use a " "Docker container as a fully-featured development environment. It allows " "you to open any folder inside (or mounted into) a container and take " "advantage of Visual Studio Code's full feature set. A " -":code:`devcontainer.json` file in your project tells VS Code how to " -"access (or create) a development container with a well-defined tool and " -"runtime stack. This container can be used to run an application or to " -"separate tools, libraries, or runtimes needed for working with a " -"codebase." +"``devcontainer.json`` file in your project tells VS Code how to access " +"(or create) a development container with a well-defined tool and runtime " +"stack. This container can be used to run an application or to separate " +"tools, libraries, or runtimes needed for working with a codebase." msgstr "" "L'extension Visual Studio Code Remote - Containers te permet d'utiliser " "un conteneur Docker comme environnement de développement complet. Elle te" @@ -621,7 +621,7 @@ msgstr "" " les outils, les bibliothèques ou les exécutions nécessaires pour " "travailler avec une base de code." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:9 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:16 msgid "" "Workspace files are mounted from the local file system or copied or " "cloned into the container. Extensions are installed and run inside the " @@ -637,7 +637,7 @@ msgstr "" "environnement de développement simplement en te connectant à un autre " "conteneur." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:22 #, fuzzy msgid "" "Source: `Official VSCode documentation " @@ -646,19 +646,19 @@ msgstr "" "Source : `Documentation officielle de VSCode " "`_" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:26 msgid "Getting started" msgstr "Pour commencer" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:17 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:28 #, fuzzy msgid "" -"Configuring and setting up the :code:`Dockerfile` as well the " -"configuration for the devcontainer can be a bit more involved. The good " -"thing is you don't have to do it. Usually it should be enough to install " -"`Docker `_ on your system and " -"ensure its available on your command line. Additionally, install the " -"`VSCode Containers Extension `_ on your system and ensure its" +" available on your command line. Additionally, install the `VSCode " +"Containers Extension `_." msgstr "" "La configuration et le paramétrage du :code:`Dockerfile` ainsi que la " @@ -669,7 +669,7 @@ msgstr "" "`VSCode Containers Extension `_." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:19 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:35 msgid "" "Now you should be good to go. When starting VSCode, it will ask you to " "run in the container environment and - if you confirm - automatically " @@ -686,7 +686,7 @@ msgstr "" "inférieur gauche de ta fenêtre VSCode et sélectionner l'option " "*(Re)Ouvrir le dossier dans le conteneur*." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:21 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:41 msgid "" "In some cases your setup might be more involved. For those cases consult " "the following sources:" @@ -694,7 +694,7 @@ msgstr "" "Dans certains cas, ton installation peut être plus complexe. Pour ces " "cas-là, consulte les sources suivantes :" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:44 #, fuzzy msgid "" "`Developing inside a Container " @@ -705,7 +705,7 @@ msgstr "" "`_" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:46 #, fuzzy msgid "" "`Remote development in Containers " @@ -737,7 +737,7 @@ msgstr "" "supprimer ``poetry.lock`` (``rm poetry.lock``) avant d'exécuter ``poetry " "install``)." -#: ../../source/contributor-how-to-install-development-versions.rst:12 +#: ../../source/contributor-how-to-install-development-versions.rst:14 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (without " "extras)" @@ -745,7 +745,7 @@ msgstr "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (sans " "extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:13 +#: ../../source/contributor-how-to-install-development-versions.rst:15 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (with extras)" @@ -753,7 +753,7 @@ msgstr "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (avec extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:15 +#: ../../source/contributor-how-to-install-development-versions.rst:18 msgid "" "Install ``flwr`` from a local copy of the Flower source code via " "``pyproject.toml``:" @@ -761,11 +761,11 @@ msgstr "" "Installez ``flwr`` à partir d'une copie locale du code source de Flower " "via ``pyproject.toml`` :" -#: ../../source/contributor-how-to-install-development-versions.rst:17 +#: ../../source/contributor-how-to-install-development-versions.rst:20 msgid "``flwr = { path = \"../../\", develop = true }`` (without extras)" msgstr "``flwr = { path = \"../../\", develop = true }`` (sans extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:18 +#: ../../source/contributor-how-to-install-development-versions.rst:21 msgid "" "``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " "}`` (with extras)" @@ -773,11 +773,11 @@ msgstr "" "``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " "}`` (avec extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:20 +#: ../../source/contributor-how-to-install-development-versions.rst:23 msgid "Install ``flwr`` from a local wheel file via ``pyproject.toml``:" msgstr "Installez ``flwr`` à partir d'un fichier local via ``pyproject.toml`` :" -#: ../../source/contributor-how-to-install-development-versions.rst:22 +#: ../../source/contributor-how-to-install-development-versions.rst:25 #, fuzzy msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (without" @@ -786,7 +786,7 @@ msgstr "" "``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\" }`` (sans " "extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:23 +#: ../../source/contributor-how-to-install-development-versions.rst:26 #, fuzzy msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\", extras = " @@ -795,7 +795,7 @@ msgstr "" "``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\", extras = " "[\"simulation\"] }`` (avec extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:25 +#: ../../source/contributor-how-to-install-development-versions.rst:29 msgid "" "Please refer to the Poetry documentation for further details: `Poetry " "Dependency Specification `_" -#: ../../source/contributor-how-to-install-development-versions.rst:28 +#: ../../source/contributor-how-to-install-development-versions.rst:33 msgid "Using pip (recommended on Colab)" msgstr "Utiliser pip (recommandé sur Colab)" -#: ../../source/contributor-how-to-install-development-versions.rst:30 +#: ../../source/contributor-how-to-install-development-versions.rst:35 msgid "Install a ``flwr`` pre-release from PyPI:" msgstr "Installe une pré-version de ``flwr`` depuis PyPI :" -#: ../../source/contributor-how-to-install-development-versions.rst:32 +#: ../../source/contributor-how-to-install-development-versions.rst:37 msgid "``pip install -U --pre flwr`` (without extras)" msgstr "``pip install -U --pre flwr`` (sans les extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:33 +#: ../../source/contributor-how-to-install-development-versions.rst:38 msgid "``pip install -U --pre 'flwr[simulation]'`` (with extras)" msgstr "``pip install -U --pre 'flwr[simulation]'`` (avec les extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:35 +#: ../../source/contributor-how-to-install-development-versions.rst:40 msgid "" "Python packages can be installed from git repositories. Use one of the " "following commands to install the Flower directly from GitHub." @@ -830,11 +830,11 @@ msgstr "" "Utilise l'une des commandes suivantes pour installer Flower directement à" " partir de GitHub." -#: ../../source/contributor-how-to-install-development-versions.rst:37 +#: ../../source/contributor-how-to-install-development-versions.rst:43 msgid "Install ``flwr`` from the default GitHub branch (``main``):" msgstr "Installez ``flwr`` à partir de la branche GitHub par défaut (``main``) :" -#: ../../source/contributor-how-to-install-development-versions.rst:39 +#: ../../source/contributor-how-to-install-development-versions.rst:45 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git`` (without " "extras)" @@ -842,7 +842,7 @@ msgstr "" "``pip install flwr@git+https://github.com/adap/flower.git`` (sans les " "extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:40 +#: ../../source/contributor-how-to-install-development-versions.rst:46 msgid "" "``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" " (with extras)" @@ -850,13 +850,13 @@ msgstr "" "``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" " (avec les extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:42 +#: ../../source/contributor-how-to-install-development-versions.rst:49 msgid "Install ``flwr`` from a specific GitHub branch (``branch-name``):" msgstr "" "Installez ``flwr`` à partir d'une branche GitHub spécifique (``nom-" "branche``) :" -#: ../../source/contributor-how-to-install-development-versions.rst:44 +#: ../../source/contributor-how-to-install-development-versions.rst:51 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " "(without extras)" @@ -864,7 +864,7 @@ msgstr "" "``pip install flwr@git+https://github.com/adap/flower.git@nom-branche`` " "(sans les extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:45 +#: ../../source/contributor-how-to-install-development-versions.rst:53 #, fuzzy msgid "" "``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" @@ -873,11 +873,11 @@ msgstr "" "``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" "``@nom-de-la-branche'`` (avec des extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:49 +#: ../../source/contributor-how-to-install-development-versions.rst:57 msgid "Open Jupyter Notebooks on Google Colab" msgstr "Ouvre les carnets Jupyter sur Google Colab" -#: ../../source/contributor-how-to-install-development-versions.rst:51 +#: ../../source/contributor-how-to-install-development-versions.rst:59 #, fuzzy msgid "" "Open the notebook ``doc/source/tutorial-series-get-started-with-flower-" @@ -886,7 +886,7 @@ msgstr "" "Ouvrir le notebook ``doc/source/tutorial/Flower-1-Intro-to-FL-" "PyTorch.ipynb`` :" -#: ../../source/contributor-how-to-install-development-versions.rst:53 +#: ../../source/contributor-how-to-install-development-versions.rst:61 #, fuzzy msgid "" "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" @@ -895,7 +895,7 @@ msgstr "" "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" "/tutorial-get-started-with-flower-pytorch.ipynb" -#: ../../source/contributor-how-to-install-development-versions.rst:55 +#: ../../source/contributor-how-to-install-development-versions.rst:63 msgid "" "Open a development version of the same notebook from branch `branch-name`" " by changing ``main`` to ``branch-name`` (right after ``blob``):" @@ -904,7 +904,7 @@ msgstr "" "`nom-branche` en remplaçant `main` par `nom-branche` (juste après `blob`)" " :" -#: ../../source/contributor-how-to-install-development-versions.rst:57 +#: ../../source/contributor-how-to-install-development-versions.rst:66 #, fuzzy msgid "" "https://colab.research.google.com/github/adap/flower/blob/branch-" @@ -913,21 +913,21 @@ msgstr "" "https://colab.research.google.com/github/adap/flower/blob/branch-" "name/doc/source/tutorial-get-started-with-flower-pytorch.ipynb" -#: ../../source/contributor-how-to-install-development-versions.rst:59 +#: ../../source/contributor-how-to-install-development-versions.rst:68 msgid "Install a `whl` on Google Colab:" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:61 +#: ../../source/contributor-how-to-install-development-versions.rst:70 msgid "" "In the vertical icon grid on the left hand side, select ``Files`` > " "``Upload to session storage``" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:62 +#: ../../source/contributor-how-to-install-development-versions.rst:72 msgid "Upload the whl (e.g., ``flwr-1.8.0-py3-none-any.whl``)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:63 +#: ../../source/contributor-how-to-install-development-versions.rst:73 msgid "" "Change ``!pip install -q 'flwr[simulation]' torch torchvision " "matplotlib`` to ``!pip install -q 'flwr-1.8.0-py3-none-" @@ -946,11 +946,11 @@ msgstr "" "Ce document décrit le processus de diffusion actuel, qui peut ou non " "changer à l'avenir." -#: ../../source/contributor-how-to-release-flower.rst:7 +#: ../../source/contributor-how-to-release-flower.rst:8 msgid "During the release" msgstr "Lors de la sortie" -#: ../../source/contributor-how-to-release-flower.rst:9 +#: ../../source/contributor-how-to-release-flower.rst:10 msgid "" "The version number of a release is stated in ``pyproject.toml``. To " "release a new version of Flower, the following things need to happen (in " @@ -960,14 +960,14 @@ msgstr "" "Pour publier une nouvelle version de Flower, les choses suivantes doivent" " se produire (dans cet ordre) :" -#: ../../source/contributor-how-to-release-flower.rst:11 +#: ../../source/contributor-how-to-release-flower.rst:13 msgid "" "Run ``python3 src/py/flwr_tool/update_changelog.py `` in " "order to add every new change to the changelog (feel free to make manual " "changes to the changelog afterwards until it looks good)." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:12 +#: ../../source/contributor-how-to-release-flower.rst:16 msgid "" "Once the changelog has been updated with all the changes, run ``./dev" "/prepare-release-changelog.sh v``, where ```` " @@ -977,7 +977,7 @@ msgid "" "the contributors. Open a pull request with those changes." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:13 +#: ../../source/contributor-how-to-release-flower.rst:22 msgid "" "Once the pull request is merged, tag the release commit with the version " "number as soon as the PR is merged: ``git tag v`` (notice " @@ -986,33 +986,33 @@ msgid "" "artifacts and the relevant part of the changelog." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:14 +#: ../../source/contributor-how-to-release-flower.rst:26 msgid "Check the draft release on GitHub, and if everything is good, publish it." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:17 +#: ../../source/contributor-how-to-release-flower.rst:29 msgid "After the release" msgstr "Après la publication" -#: ../../source/contributor-how-to-release-flower.rst:19 +#: ../../source/contributor-how-to-release-flower.rst:31 msgid "Create a pull request which contains the following changes:" msgstr "Crée une demande de pull qui contient les modifications suivantes :" -#: ../../source/contributor-how-to-release-flower.rst:21 +#: ../../source/contributor-how-to-release-flower.rst:33 msgid "Increase the minor version in ``pyproject.toml`` by one." msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." -#: ../../source/contributor-how-to-release-flower.rst:22 +#: ../../source/contributor-how-to-release-flower.rst:34 msgid "Update all files which contain the current version number if necessary." msgstr "" "Mets à jour tous les fichiers qui contiennent le numéro de version actuel" " si nécessaire." -#: ../../source/contributor-how-to-release-flower.rst:23 +#: ../../source/contributor-how-to-release-flower.rst:35 msgid "Add a new ``Unreleased`` section in ``changelog.md``." msgstr "Ajoute une nouvelle section ``Unreleased`` dans ``changelog.md``." -#: ../../source/contributor-how-to-release-flower.rst:25 +#: ../../source/contributor-how-to-release-flower.rst:37 msgid "" "Merge the pull request on the same day (i.e., before a new nightly " "release gets published to PyPI)." @@ -1020,15 +1020,15 @@ msgstr "" "Fusionne la pull request le jour même (c'est-à-dire avant qu'une nouvelle" " version nightly ne soit publiée sur PyPI)." -#: ../../source/contributor-how-to-release-flower.rst:28 +#: ../../source/contributor-how-to-release-flower.rst:41 msgid "Publishing a pre-release" msgstr "Publier une pré-version" -#: ../../source/contributor-how-to-release-flower.rst:31 +#: ../../source/contributor-how-to-release-flower.rst:44 msgid "Pre-release naming" msgstr "Nom de la pré-version" -#: ../../source/contributor-how-to-release-flower.rst:33 +#: ../../source/contributor-how-to-release-flower.rst:46 msgid "" "PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" " MUST use one of the following naming patterns:" @@ -1037,39 +1037,39 @@ msgstr "" "Les préversions DOIVENT utiliser l'un des modèles de dénomination " "suivants :" -#: ../../source/contributor-how-to-release-flower.rst:35 +#: ../../source/contributor-how-to-release-flower.rst:49 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" msgstr "Alpha : ``MAJOR.MINOR.PATCHaN``" -#: ../../source/contributor-how-to-release-flower.rst:36 +#: ../../source/contributor-how-to-release-flower.rst:50 msgid "Beta: ``MAJOR.MINOR.PATCHbN``" msgstr "Bêta : ``MAJOR.MINOR.PATCHbN``" -#: ../../source/contributor-how-to-release-flower.rst:37 +#: ../../source/contributor-how-to-release-flower.rst:51 msgid "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" msgstr "Candidat à la publication (RC) : ``MAJOR.MINOR.PATCHrcN``" -#: ../../source/contributor-how-to-release-flower.rst:39 +#: ../../source/contributor-how-to-release-flower.rst:53 msgid "Examples include:" msgstr "Voici quelques exemples :" -#: ../../source/contributor-how-to-release-flower.rst:41 +#: ../../source/contributor-how-to-release-flower.rst:55 msgid "``1.0.0a0``" msgstr "``1.0.0a0``" -#: ../../source/contributor-how-to-release-flower.rst:42 +#: ../../source/contributor-how-to-release-flower.rst:56 msgid "``1.0.0b0``" msgstr "``1.0.0b0``" -#: ../../source/contributor-how-to-release-flower.rst:43 +#: ../../source/contributor-how-to-release-flower.rst:57 msgid "``1.0.0rc0``" msgstr "``1.0.0rc0``" -#: ../../source/contributor-how-to-release-flower.rst:44 +#: ../../source/contributor-how-to-release-flower.rst:58 msgid "``1.0.0rc1``" msgstr "1.0.0rc1" -#: ../../source/contributor-how-to-release-flower.rst:46 +#: ../../source/contributor-how-to-release-flower.rst:60 msgid "" "This is in line with PEP-440 and the recommendations from the Python " "Packaging Authority (PyPA):" @@ -1077,11 +1077,11 @@ msgstr "" "Ceci est conforme au PEP-440 et aux recommandations de l'Autorité de " "l'emballage Python (PyPA) :" -#: ../../source/contributor-how-to-release-flower.rst:49 +#: ../../source/contributor-how-to-release-flower.rst:63 msgid "`PEP-440 `_" msgstr "`PEP-440 `_" -#: ../../source/contributor-how-to-release-flower.rst:50 +#: ../../source/contributor-how-to-release-flower.rst:64 msgid "" "`PyPA Choosing a versioning scheme " "`_" -#: ../../source/contributor-how-to-release-flower.rst:52 +#: ../../source/contributor-how-to-release-flower.rst:67 msgid "" "Note that the approach defined by PyPA is not compatible with SemVer " "2.0.0 spec, for details consult the `Semantic Versioning Specification " @@ -1103,17 +1103,17 @@ msgstr "" "Versioning Specification `_ (en particulier le point 11 sur la préséance)." -#: ../../source/contributor-how-to-release-flower.rst:55 +#: ../../source/contributor-how-to-release-flower.rst:73 msgid "Pre-release classification" msgstr "Classification avant publication" -#: ../../source/contributor-how-to-release-flower.rst:57 +#: ../../source/contributor-how-to-release-flower.rst:75 msgid "Should the next pre-release be called alpha, beta, or release candidate?" msgstr "" "La prochaine préversion doit-elle être appelée alpha, bêta ou release " "candidate ?" -#: ../../source/contributor-how-to-release-flower.rst:59 +#: ../../source/contributor-how-to-release-flower.rst:77 msgid "" "RC: feature complete, no known issues (apart from issues that are " "classified as \"won't fix\" for the next stable release) - if no issues " @@ -1124,11 +1124,11 @@ msgstr "" "version stable) - si aucun problème n'apparaît, cette version deviendra " "la prochaine version stable" -#: ../../source/contributor-how-to-release-flower.rst:60 +#: ../../source/contributor-how-to-release-flower.rst:80 msgid "Beta: feature complete, allowed to have known issues" msgstr "Bêta : fonctionnalité complète, autorisée à avoir des problèmes connus" -#: ../../source/contributor-how-to-release-flower.rst:61 +#: ../../source/contributor-how-to-release-flower.rst:81 msgid "Alpha: not feature complete, allowed to have known issues" msgstr "" "Alpha : les fonctionnalités ne sont pas complètes, les problèmes connus " @@ -1151,12 +1151,12 @@ msgstr "" "Anaconda. Tu peux suivre les instructions ou choisir la configuration que" " tu préfères." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:9 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:10 msgid "Python Version" msgstr "Version Python" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:11 -#: ../../source/how-to-install-flower.rst:8 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:12 +#: ../../source/how-to-install-flower.rst:7 #, fuzzy msgid "" "Flower requires at least `Python 3.9 `_, " @@ -1166,7 +1166,7 @@ msgstr "" "Flower nécessite `Python 3.9 `_ ou plus, " "nous recommandons `Python 3.10 `_." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:14 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:17 msgid "" "Due to a known incompatibility with `ray " "`_, we currently recommend utilizing at " @@ -1174,12 +1174,12 @@ msgid "" "simulations." msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:19 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:22 #, fuzzy msgid "Virtualenv with Pyenv/Virtualenv" msgstr "Virutualenv avec Pyenv/Virtualenv" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:21 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:24 msgid "" "One of the recommended virtual environment is `pyenv " "`_/`virtualenv `_ pour plus de " "détails." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:23 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 #, fuzzy msgid "" "Once Pyenv is set up, you can use it to install `Python Version 3.10 " @@ -1201,19 +1201,19 @@ msgstr "" "Une fois Pyenv mis en place, tu peux l'utiliser pour installer `Python " "Version 3.7 `_ ou supérieure :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 msgid "Create the virtualenv with:" msgstr "Crée le virtualenv avec :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:42 msgid "Activate the virtualenv by running the following command:" msgstr "Active la virtualenv en exécutant la commande suivante :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:44 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:49 msgid "Virtualenv with Poetry" msgstr "Virtualenv et la poésie" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:46 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:51 msgid "" "The Flower examples are based on `Poetry `_ to manage dependencies. After installing Poetry you " @@ -1223,7 +1223,7 @@ msgstr "" "poetry.org/docs/>`_ pour gérer les dépendances. Après l'installation de " "Poetry, il te suffit de créer un environnement virtuel avec :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:52 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:58 msgid "" "If you open a new terminal you can activate the previously created " "virtual environment with the following command:" @@ -1231,15 +1231,16 @@ msgstr "" "Si tu ouvres un nouveau terminal, tu peux activer l'environnement virtuel" " précédemment créé avec la commande suivante :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:60 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:66 msgid "Virtualenv with Anaconda" msgstr "Virtualenv avec Anaconda" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:62 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#, fuzzy msgid "" "If you prefer to use Anaconda for your virtual environment then install " "and setup the `conda `_ package. After setting it up you can " +"/user-guide/install/index.html>`_ package. After setting it up you can " "create a virtual environment with:" msgstr "" "Si tu préfères utiliser Anaconda pour ton environnement virtuel, installe" @@ -1248,15 +1249,15 @@ msgstr "" "guide/install/index.html>`_. Après l'avoir configuré, tu peux créer un " "environnement virtuel avec :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 msgid "and activate the virtual environment with:" msgstr "et active l'environnement virtuel avec :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:83 msgid "And then?" msgstr "Et ensuite ?" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:78 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:85 msgid "" "As soon as you created your virtual environment you clone one of the " "`Flower examples `_." @@ -1268,11 +1269,11 @@ msgstr "" msgid "Write documentation" msgstr "Rédiger de la documentation" -#: ../../source/contributor-how-to-write-documentation.rst:6 +#: ../../source/contributor-how-to-write-documentation.rst:5 msgid "Project layout" msgstr "Schéma du projet" -#: ../../source/contributor-how-to-write-documentation.rst:8 +#: ../../source/contributor-how-to-write-documentation.rst:7 msgid "" "The Flower documentation lives in the ``doc`` directory. The Sphinx-based" " documentation system supports both reStructuredText (``.rst`` files) and" @@ -1283,7 +1284,7 @@ msgstr "" "reStructuredText (fichiers `.rst`) et Markdown (fichiers `.md`)." #: ../../source/contributor-how-to-write-documentation.rst:10 -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:169 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:193 #, fuzzy msgid "" "Note that, in order to build the documentation locally (with ``poetry run" @@ -1295,20 +1296,20 @@ msgstr "" "make html``, comme décrit plus bas), `Pandoc " "_` doit être installé sur le système." -#: ../../source/contributor-how-to-write-documentation.rst:14 +#: ../../source/contributor-how-to-write-documentation.rst:15 msgid "Edit an existing page" msgstr "Modifier une page existante" -#: ../../source/contributor-how-to-write-documentation.rst:16 +#: ../../source/contributor-how-to-write-documentation.rst:17 msgid "Edit an existing ``.rst`` (or ``.md``) file under ``doc/source/``" msgstr "Modifier un fichier ``.rst`` (ou ``.md``) existant sous ``doc/source/``" -#: ../../source/contributor-how-to-write-documentation.rst:17 +#: ../../source/contributor-how-to-write-documentation.rst:18 #: ../../source/contributor-how-to-write-documentation.rst:27 msgid "Compile the docs: ``cd doc``, then ``poetry run make html``" msgstr "Compilez les documents : ``cd doc``, puis ``poetry run make html``" -#: ../../source/contributor-how-to-write-documentation.rst:18 +#: ../../source/contributor-how-to-write-documentation.rst:19 #: ../../source/contributor-how-to-write-documentation.rst:28 msgid "Open ``doc/build/html/index.html`` in the browser to check the result" msgstr "" @@ -1347,11 +1348,11 @@ msgstr "" "quelques recommandations sur les points de départ pour augmenter tes " "chances de voir ton PR accepté dans la base de code de Flower." -#: ../../source/contributor-ref-good-first-contributions.rst:11 +#: ../../source/contributor-ref-good-first-contributions.rst:9 msgid "Where to start" msgstr "Par où commencer" -#: ../../source/contributor-ref-good-first-contributions.rst:13 +#: ../../source/contributor-ref-good-first-contributions.rst:11 msgid "" "Until the Flower core library matures it will be easier to get PR's " "accepted if they only touch non-core areas of the codebase. Good " @@ -1362,25 +1363,25 @@ msgstr "" " non essentielles de la base de code. Les bons candidats pour commencer " "sont :" -#: ../../source/contributor-ref-good-first-contributions.rst:17 +#: ../../source/contributor-ref-good-first-contributions.rst:14 msgid "Documentation: What's missing? What could be expressed more clearly?" msgstr "" "Documentation : Qu'est-ce qui manque ? Qu'est-ce qui pourrait être " "exprimé plus clairement ?" -#: ../../source/contributor-ref-good-first-contributions.rst:18 +#: ../../source/contributor-ref-good-first-contributions.rst:15 msgid "Baselines: See below." msgstr "Références : voir ci-dessous." -#: ../../source/contributor-ref-good-first-contributions.rst:19 +#: ../../source/contributor-ref-good-first-contributions.rst:16 msgid "Examples: See below." msgstr "Exemples : voir ci-dessous." -#: ../../source/contributor-ref-good-first-contributions.rst:23 +#: ../../source/contributor-ref-good-first-contributions.rst:19 msgid "Request for Flower Baselines" msgstr "Demande pour une nouvelle Flower Baseline" -#: ../../source/contributor-ref-good-first-contributions.rst:25 +#: ../../source/contributor-ref-good-first-contributions.rst:21 #, fuzzy msgid "" "If you are not familiar with Flower Baselines, you should probably check-" @@ -1391,7 +1392,7 @@ msgstr "" "probablement consulter notre `guide de contribution pour les baselines " "`_." -#: ../../source/contributor-ref-good-first-contributions.rst:27 +#: ../../source/contributor-ref-good-first-contributions.rst:25 #, fuzzy msgid "" "You should then check out the open `issues " @@ -1406,7 +1407,7 @@ msgstr "" " laquelle tu aimerais travailler et qui n'a pas d'assignés, n'hésite pas " "à te l'attribuer et à commencer à travailler dessus !" -#: ../../source/contributor-ref-good-first-contributions.rst:31 +#: ../../source/contributor-ref-good-first-contributions.rst:30 msgid "" "Otherwise, if you don't find a baseline you'd like to work on, be sure to" " open a new issue with the baseline request template!" @@ -1459,12 +1460,13 @@ msgstr "" "protocole SecAgg peut être considéré comme un cas particulier du " "protocole SecAgg+." -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:8 -msgid "The :code:`SecAgg+` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:9 +#, fuzzy +msgid "The ``SecAgg+`` abstraction" msgstr "L'abstraction :code:`SecAgg+`" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:10 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:11 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:163 msgid "" "In this implementation, each client will be assigned with a unique index " "(int) for secure aggregation, and thus many python dictionaries used have" @@ -1475,8 +1477,8 @@ msgstr "" "dictionnaires python utilisés ont des clés de type int plutôt que de type" " ClientProxy." -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:65 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:198 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:67 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:204 msgid "" "The Flower server will execute and process received results in the " "following order:" @@ -1484,11 +1486,12 @@ msgstr "" "Le serveur Flower exécutera et traitera les résultats reçus dans l'ordre " "suivant :" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:159 -msgid "The :code:`LightSecAgg` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#, fuzzy +msgid "The ``LightSecAgg`` abstraction" msgstr "L'abstraction :code:`LightSecAgg`" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:271 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:277 msgid "Types" msgstr "Types" @@ -1505,7 +1508,7 @@ msgstr "" "de Flower mais qui n'ont pas l'habitude de contribuer à des projets " "GitHub." -#: ../../source/contributor-tutorial-contribute-on-github.rst:6 +#: ../../source/contributor-tutorial-contribute-on-github.rst:7 #, fuzzy msgid "" "If you're familiar with how contributing on GitHub works, you can " @@ -1518,15 +1521,15 @@ msgstr "" "contributors.html>`_ et des exemples de `bonnes premières contributions " "`_." -#: ../../source/contributor-tutorial-contribute-on-github.rst:10 +#: ../../source/contributor-tutorial-contribute-on-github.rst:12 msgid "Setting up the repository" msgstr "Mise en place du référentiel" -#: ../../source/contributor-tutorial-contribute-on-github.rst:21 +#: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "**Create a GitHub account and setup Git**" msgstr "**Créer un compte GitHub et configurer Git**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:13 +#: ../../source/contributor-tutorial-contribute-on-github.rst:15 #, fuzzy msgid "" "Git is a distributed version control tool. This allows for an entire " @@ -1541,7 +1544,7 @@ msgstr "" "locale, tu peux suivre ce `guide `_ pour le mettre en place." -#: ../../source/contributor-tutorial-contribute-on-github.rst:16 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "" "GitHub, itself, is a code hosting platform for version control and " "collaboration. It allows for everyone to collaborate and work from " @@ -1551,7 +1554,7 @@ msgstr "" "contrôle des versions et la collaboration. Il permet à chacun de " "collaborer et de travailler de n'importe où sur des dépôts à distance." -#: ../../source/contributor-tutorial-contribute-on-github.rst:18 +#: ../../source/contributor-tutorial-contribute-on-github.rst:25 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." @@ -1559,7 +1562,7 @@ msgstr "" "Si ce n'est pas déjà fait, tu devras créer un compte sur `GitHub " "`_." -#: ../../source/contributor-tutorial-contribute-on-github.rst:20 +#: ../../source/contributor-tutorial-contribute-on-github.rst:28 msgid "" "The idea behind the generic Git and GitHub workflow boils down to this: " "you download code from a remote repository on GitHub, make changes " @@ -1571,15 +1574,15 @@ msgstr "" " des modifications localement et tu en gardes une trace à l'aide de Git, " "puis tu télécharges ton nouvel historique à nouveau sur GitHub." -#: ../../source/contributor-tutorial-contribute-on-github.rst:32 +#: ../../source/contributor-tutorial-contribute-on-github.rst:42 msgid "**Forking the Flower repository**" msgstr "**Fourche le dépôt de Flower**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:24 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 #, fuzzy msgid "" "A fork is a personal copy of a GitHub repository. To create one for " -"Flower, you must navigate to ``_ (while " +"Flower, you must navigate to https://github.com/adap/flower (while " "connected to your GitHub account) and click the ``Fork`` button situated " "on the top right of the page." msgstr "" @@ -1588,7 +1591,7 @@ msgstr "" "étant connecté à ton compte GitHub) et cliquer sur le bouton ``Fork`` " "situé en haut à droite de la page." -#: ../../source/contributor-tutorial-contribute-on-github.rst:29 +#: ../../source/contributor-tutorial-contribute-on-github.rst:38 msgid "" "You can change the name if you want, but this is not necessary as this " "version of Flower will be yours and will sit inside your own account " @@ -1601,11 +1604,11 @@ msgstr "" " devrais voir dans le coin supérieur gauche que tu es en train de " "regarder ta propre version de Flower." -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 msgid "**Cloning your forked repository**" msgstr "**Clonage de ton dépôt forké**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:35 +#: ../../source/contributor-tutorial-contribute-on-github.rst:45 msgid "" "The next step is to download the forked repository on your machine to be " "able to make changes to it. On your forked repository page, you should " @@ -1617,7 +1620,7 @@ msgstr "" "forké, tu dois d'abord cliquer sur le bouton ``Code`` à droite, ce qui te" " permettra de copier le lien HTTPS du dépôt." -#: ../../source/contributor-tutorial-contribute-on-github.rst:41 +#: ../../source/contributor-tutorial-contribute-on-github.rst:52 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" @@ -1626,7 +1629,7 @@ msgstr "" "machine, naviguer jusqu'à l'endroit où tu veux télécharger le référentiel" " et taper :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 #, fuzzy msgid "" "This will create a ``flower/`` (or the name of your fork if you renamed " @@ -1635,15 +1638,15 @@ msgstr "" "Cela créera un dossier `flower/` (ou le nom de ta fourche si tu l'as " "renommée) dans le répertoire de travail actuel." -#: ../../source/contributor-tutorial-contribute-on-github.rst:66 +#: ../../source/contributor-tutorial-contribute-on-github.rst:78 msgid "**Add origin**" msgstr "**Ajouter l'origine**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:50 +#: ../../source/contributor-tutorial-contribute-on-github.rst:62 msgid "You can then go into the repository folder:" msgstr "Tu peux ensuite aller dans le dossier du référentiel :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:56 +#: ../../source/contributor-tutorial-contribute-on-github.rst:68 msgid "" "And here we will need to add an origin to our repository. The origin is " "the \\ of the remote fork repository. To obtain it, we can do as " @@ -1655,7 +1658,7 @@ msgstr "" "indiqué précédemment en allant sur notre dépôt fork sur notre compte " "GitHub et en copiant le lien." -#: ../../source/contributor-tutorial-contribute-on-github.rst:61 +#: ../../source/contributor-tutorial-contribute-on-github.rst:75 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" @@ -1663,11 +1666,11 @@ msgstr "" "Une fois que le \\ est copié, nous pouvons taper la commande " "suivante dans notre terminal :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:90 +#: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "**Add upstream**" msgstr "**Ajouter en amont**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:69 +#: ../../source/contributor-tutorial-contribute-on-github.rst:81 #, fuzzy msgid "" "Now we will add an upstream address to our repository. Still in the same " @@ -1677,13 +1680,13 @@ msgstr "" "Toujours dans le même directroy, nous devons exécuter la commande " "suivante :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:76 +#: ../../source/contributor-tutorial-contribute-on-github.rst:88 msgid "The following diagram visually explains what we did in the previous steps:" msgstr "" "Le schéma suivant explique visuellement ce que nous avons fait dans les " "étapes précédentes :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:80 +#: ../../source/contributor-tutorial-contribute-on-github.rst:92 msgid "" "The upstream is the GitHub remote address of the parent repository (in " "this case Flower), i.e. the one we eventually want to contribute to and " @@ -1697,7 +1700,7 @@ msgstr "" "simplement l'adresse distante GitHub du dépôt forké que nous avons créé, " "c'est-à-dire la copie (fork) dans notre propre compte." -#: ../../source/contributor-tutorial-contribute-on-github.rst:84 +#: ../../source/contributor-tutorial-contribute-on-github.rst:97 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" @@ -1706,11 +1709,11 @@ msgstr "" "dernières modifications du dépôt Flower, nous pouvons exécuter la " "commande suivante :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:93 +#: ../../source/contributor-tutorial-contribute-on-github.rst:105 msgid "Setting up the coding environment" msgstr "Mise en place de l'environnement de codage" -#: ../../source/contributor-tutorial-contribute-on-github.rst:95 +#: ../../source/contributor-tutorial-contribute-on-github.rst:107 #, fuzzy msgid "" "This can be achieved by following this :doc:`getting started guide for " @@ -1723,11 +1726,11 @@ msgstr "" "fois que tu es capable d'écrire du code et de le tester, tu peux enfin " "commencer à faire des changements !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:100 +#: ../../source/contributor-tutorial-contribute-on-github.rst:113 msgid "Making changes" msgstr "Apporter des changements" -#: ../../source/contributor-tutorial-contribute-on-github.rst:102 +#: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" "Before making any changes make sure you are up-to-date with your " "repository:" @@ -1735,15 +1738,15 @@ msgstr "" "Avant de faire des changements, assure-toi que tu es à jour avec ton " "référentiel :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:108 +#: ../../source/contributor-tutorial-contribute-on-github.rst:121 msgid "And with Flower's repository:" msgstr "Et avec le référentiel de Flower :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:122 +#: ../../source/contributor-tutorial-contribute-on-github.rst:134 msgid "**Create a new branch**" msgstr "**Créer une nouvelle branche**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:115 +#: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" "To make the history cleaner and easier to work with, it is good practice " "to create a new branch for each feature/project that needs to be " @@ -1753,7 +1756,7 @@ msgstr "" "une bonne pratique de créer une nouvelle branche pour chaque " "fonctionnalité/projet qui doit être mis en œuvre." -#: ../../source/contributor-tutorial-contribute-on-github.rst:118 +#: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "" "To do so, just run the following command inside the repository's " "directory:" @@ -1761,21 +1764,21 @@ msgstr "" "Pour ce faire, il suffit d'exécuter la commande suivante dans le " "répertoire du référentiel :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:136 msgid "**Make changes**" msgstr "**Apporter des modifications**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:137 msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "" "Écris du bon code et crée de merveilleuses modifications à l'aide de ton " "éditeur préféré !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:138 +#: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "**Test and format your code**" msgstr "**Teste et mets en forme ton code**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:128 +#: ../../source/contributor-tutorial-contribute-on-github.rst:139 msgid "" "Don't forget to test and format your code! Otherwise your code won't be " "able to be merged into the Flower repository. This is done so the " @@ -1785,15 +1788,15 @@ msgstr "" "pourra pas être fusionné dans le dépôt Flower, et ce, afin que la base de" " code reste cohérente et facile à comprendre." -#: ../../source/contributor-tutorial-contribute-on-github.rst:131 +#: ../../source/contributor-tutorial-contribute-on-github.rst:143 msgid "To do so, we have written a few scripts that you can execute:" msgstr "Pour ce faire, nous avons écrit quelques scripts que tu peux exécuter :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:150 +#: ../../source/contributor-tutorial-contribute-on-github.rst:162 msgid "**Stage changes**" msgstr "**Changements de scène**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:141 +#: ../../source/contributor-tutorial-contribute-on-github.rst:152 msgid "" "Before creating a commit that will update your history, you must specify " "to Git which files it needs to take into account." @@ -1801,48 +1804,51 @@ msgstr "" "Avant de créer un commit qui mettra à jour ton historique, tu dois " "spécifier à Git les fichiers qu'il doit prendre en compte." -#: ../../source/contributor-tutorial-contribute-on-github.rst:143 +#: ../../source/contributor-tutorial-contribute-on-github.rst:155 msgid "This can be done with:" msgstr "Cela peut se faire avec :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:149 +#: ../../source/contributor-tutorial-contribute-on-github.rst:161 +#, fuzzy msgid "" "To check which files have been modified compared to the last version " "(last commit) and to see which files are staged for commit, you can use " -"the :code:`git status` command." +"the ``git status`` command." msgstr "" "Pour vérifier quels fichiers ont été modifiés par rapport à la dernière " "version (last commit) et pour voir quels fichiers sont mis à disposition " "pour le commit, tu peux utiliser la commande :code:`git status`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:160 +#: ../../source/contributor-tutorial-contribute-on-github.rst:173 msgid "**Commit changes**" msgstr "**Commit changes**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:153 +#: ../../source/contributor-tutorial-contribute-on-github.rst:165 +#, fuzzy msgid "" -"Once you have added all the files you wanted to commit using :code:`git " -"add`, you can finally create your commit using this command:" +"Once you have added all the files you wanted to commit using ``git add``," +" you can finally create your commit using this command:" msgstr "" "Une fois que tu as ajouté tous les fichiers que tu voulais livrer à " "l'aide de :code:`git add`, tu peux enfin créer ta livraison à l'aide de " "cette commande :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:159 +#: ../../source/contributor-tutorial-contribute-on-github.rst:172 +#, fuzzy msgid "" "The \\ is there to explain to others what the commit " "does. It should be written in an imperative style and be concise. An " -"example would be :code:`git commit -m \"Add images to README\"`." +"example would be ``git commit -m \"Add images to README\"``." msgstr "" "Le ``commit_message`` est là pour expliquer aux autres ce que fait le " "commit. Il doit être écrit dans un style impératif et être concis. Un " "exemple serait :code:`git commit -m \"Ajouter des images au README\"`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:171 +#: ../../source/contributor-tutorial-contribute-on-github.rst:185 msgid "**Push the changes to the fork**" msgstr "**Pousser les changements vers la fourche**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:163 +#: ../../source/contributor-tutorial-contribute-on-github.rst:176 msgid "" "Once we have committed our changes, we have effectively updated our local" " history, but GitHub has no way of knowing this unless we push our " @@ -1853,7 +1859,7 @@ msgstr "" "moyen de le savoir à moins que nous ne poussions nos modifications vers " "l'adresse distante de notre origine :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:170 +#: ../../source/contributor-tutorial-contribute-on-github.rst:184 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." @@ -1861,15 +1867,15 @@ msgstr "" "Une fois que c'est fait, tu verras sur GitHub que ton repo forké a été " "mis à jour avec les modifications que tu as apportées." -#: ../../source/contributor-tutorial-contribute-on-github.rst:174 +#: ../../source/contributor-tutorial-contribute-on-github.rst:188 msgid "Creating and merging a pull request (PR)" msgstr "Créer et fusionner une pull request (PR)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:226 msgid "**Create the PR**" msgstr "**Créer le PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:177 +#: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" "Once you have pushed changes, on the GitHub webpage of your repository " "you should see the following message:" @@ -1877,12 +1883,12 @@ msgstr "" "Une fois que tu as poussé les modifications, sur la page web GitHub de " "ton dépôt, tu devrais voir le message suivant :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:181 +#: ../../source/contributor-tutorial-contribute-on-github.rst:196 #, fuzzy msgid "Otherwise you can always find this option in the ``Branches`` page." msgstr "Sinon, tu peux toujours trouver cette option dans la page `Branches`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:183 +#: ../../source/contributor-tutorial-contribute-on-github.rst:198 #, fuzzy msgid "" "Once you click the ``Compare & pull request`` button, you should see " @@ -1891,13 +1897,13 @@ msgstr "" "Une fois que tu as cliqué sur le bouton `Compare & pull request`, tu " "devrais voir quelque chose de similaire à ceci :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:187 +#: ../../source/contributor-tutorial-contribute-on-github.rst:203 msgid "At the top you have an explanation of which branch will be merged where:" msgstr "" "En haut, tu as une explication de quelle branche sera fusionnée à quel " "endroit :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:191 +#: ../../source/contributor-tutorial-contribute-on-github.rst:207 msgid "" "In this example you can see that the request is to merge the branch " "``doc-fixes`` from my forked repository to branch ``main`` from the " @@ -1907,14 +1913,14 @@ msgstr "" "branche ``doc-fixes`` de mon dépôt forké à la branche ``main`` du dépôt " "Flower." -#: ../../source/contributor-tutorial-contribute-on-github.rst:193 +#: ../../source/contributor-tutorial-contribute-on-github.rst:210 msgid "" "The title should be changed to adhere to the :ref:`pr_title_format` " "guidelines, otherwise it won't be possible to merge the PR. So in this " "case, a correct title might be ``docs(framework:skip) Fix typos``." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:196 +#: ../../source/contributor-tutorial-contribute-on-github.rst:214 msgid "" "The input box in the middle is there for you to describe what your PR " "does and to link it to existing issues. We have placed comments (that " @@ -1926,11 +1932,11 @@ msgstr "" "commentaires (qui ne seront pas rendus une fois le PR ouvert) pour te " "guider tout au long du processus." -#: ../../source/contributor-tutorial-contribute-on-github.rst:199 +#: ../../source/contributor-tutorial-contribute-on-github.rst:218 msgid "It is important to follow the instructions described in comments." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:220 msgid "" "At the bottom you will find the button to open the PR. This will notify " "reviewers that a new PR has been opened and that they should look over it" @@ -1940,7 +1946,7 @@ msgstr "" "qui informera les réviseurs qu'un nouveau PR a été ouvert et qu'ils " "doivent le consulter pour le fusionner ou demander des modifications." -#: ../../source/contributor-tutorial-contribute-on-github.rst:204 +#: ../../source/contributor-tutorial-contribute-on-github.rst:224 msgid "" "If your PR is not yet ready for review, and you don't want to notify " "anyone, you have the option to create a draft pull request:" @@ -1949,11 +1955,11 @@ msgstr "" " personne, tu as la possibilité de créer un brouillon de demande de " "traction :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "**Making new changes**" msgstr "**Faire de nouveaux changements**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:229 msgid "" "Once the PR has been opened (as draft or not), you can still push new " "commits to it the same way we did before, by making changes to the branch" @@ -1963,11 +1969,11 @@ msgstr "" "toujours y pousser de nouveaux commits de la même manière qu'auparavant, " "en apportant des modifications à la branche associée au PR." -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:253 msgid "**Review the PR**" msgstr "**Review the PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:212 +#: ../../source/contributor-tutorial-contribute-on-github.rst:233 msgid "" "Once the PR has been opened or once the draft PR has been marked as " "ready, a review from code owners will be automatically requested:" @@ -1976,7 +1982,7 @@ msgstr "" " étant prêt, une révision des propriétaires de code sera automatiquement " "demandée :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:216 +#: ../../source/contributor-tutorial-contribute-on-github.rst:238 msgid "" "Code owners will then look into the code, ask questions, request changes " "or validate the PR." @@ -1984,11 +1990,11 @@ msgstr "" "Les propriétaires du code vont alors se pencher sur le code, poser des " "questions, demander des modifications ou valider le RP." -#: ../../source/contributor-tutorial-contribute-on-github.rst:218 +#: ../../source/contributor-tutorial-contribute-on-github.rst:241 msgid "Merging will be blocked if there are ongoing requested changes." msgstr "La fusion sera bloquée s'il y a des changements demandés en cours." -#: ../../source/contributor-tutorial-contribute-on-github.rst:222 +#: ../../source/contributor-tutorial-contribute-on-github.rst:245 msgid "" "To resolve them, just push the necessary changes to the branch associated" " with the PR:" @@ -1996,11 +2002,11 @@ msgstr "" "Pour les résoudre, il suffit de pousser les changements nécessaires vers " "la branche associée au PR :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:226 +#: ../../source/contributor-tutorial-contribute-on-github.rst:250 msgid "And resolve the conversation:" msgstr "Et résous la conversation :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:230 +#: ../../source/contributor-tutorial-contribute-on-github.rst:254 msgid "" "Once all the conversations have been resolved, you can re-request a " "review." @@ -2008,11 +2014,11 @@ msgstr "" "Une fois que toutes les conversations ont été résolues, tu peux " "redemander un examen." -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:274 msgid "**Once the PR is merged**" msgstr "**Une fois que le PR est fusionné**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:234 +#: ../../source/contributor-tutorial-contribute-on-github.rst:256 msgid "" "If all the automatic tests have passed and reviewers have no more changes" " to request, they can approve the PR and merge it." @@ -2021,7 +2027,7 @@ msgstr "" " de modifications à demander, ils peuvent approuver le PR et le " "fusionner." -#: ../../source/contributor-tutorial-contribute-on-github.rst:238 +#: ../../source/contributor-tutorial-contribute-on-github.rst:261 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" @@ -2030,19 +2036,19 @@ msgstr "" "(un bouton devrait apparaître pour le faire) et aussi la supprimer " "localement en faisant :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:245 +#: ../../source/contributor-tutorial-contribute-on-github.rst:269 msgid "Then you should update your forked repository by doing:" msgstr "Ensuite, tu dois mettre à jour ton dépôt forké en faisant :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:277 msgid "Example of first contribution" msgstr "Exemple de première contribution" -#: ../../source/contributor-tutorial-contribute-on-github.rst:257 +#: ../../source/contributor-tutorial-contribute-on-github.rst:280 msgid "Problem" msgstr "Problème" -#: ../../source/contributor-tutorial-contribute-on-github.rst:259 +#: ../../source/contributor-tutorial-contribute-on-github.rst:282 #, fuzzy msgid "" "For our documentation, we've started to use the `Diàtaxis framework " @@ -2051,7 +2057,7 @@ msgstr "" "Pour notre documentation, nous avons commencé à utiliser le cadre " "`Diàtaxis `_." -#: ../../source/contributor-tutorial-contribute-on-github.rst:261 +#: ../../source/contributor-tutorial-contribute-on-github.rst:285 #, fuzzy msgid "" "Our \"How to\" guides should have titles that continue the sentence \"How" @@ -2061,7 +2067,7 @@ msgstr "" "la phrase \"Comment faire pour...\", par exemple, \"Comment passer à " "Flower 1.0\"." -#: ../../source/contributor-tutorial-contribute-on-github.rst:263 +#: ../../source/contributor-tutorial-contribute-on-github.rst:288 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." @@ -2070,7 +2076,7 @@ msgstr "" "changer leur titre est (malheureusement) plus compliqué qu'on ne le " "pense." -#: ../../source/contributor-tutorial-contribute-on-github.rst:265 +#: ../../source/contributor-tutorial-contribute-on-github.rst:291 #, fuzzy msgid "" "This issue is about changing the title of a doc from present continuous " @@ -2079,7 +2085,7 @@ msgstr "" "Cette question porte sur le changement du titre d'un document du présent " "continu au présent simple." -#: ../../source/contributor-tutorial-contribute-on-github.rst:267 +#: ../../source/contributor-tutorial-contribute-on-github.rst:294 #, fuzzy msgid "" "Let's take the example of \"Saving Progress\" which we changed to \"Save " @@ -2089,21 +2095,21 @@ msgstr "" "remplacé par \"Sauvegarder la progression\". Est-ce que cela passe notre " "contrôle ?" -#: ../../source/contributor-tutorial-contribute-on-github.rst:269 +#: ../../source/contributor-tutorial-contribute-on-github.rst:297 #, fuzzy msgid "Before: \"How to saving progress\" ❌" msgstr "Avant : \"Comment sauvegarder les progrès\" ❌" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 +#: ../../source/contributor-tutorial-contribute-on-github.rst:299 #, fuzzy msgid "After: \"How to save progress\" ✅" msgstr "Après : \"Comment sauvegarder la progression\" ✅" -#: ../../source/contributor-tutorial-contribute-on-github.rst:274 +#: ../../source/contributor-tutorial-contribute-on-github.rst:302 msgid "Solution" msgstr "Solution" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:304 #, fuzzy msgid "" "This is a tiny change, but it'll allow us to test your end-to-end setup. " @@ -2113,12 +2119,12 @@ msgstr "" "configuration de bout en bout. Après avoir cloné et configuré le repo " "Flower, voici ce que tu dois faire :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 #, fuzzy msgid "Find the source file in ``doc/source``" msgstr "Trouve le fichier source dans `doc/source`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 #, fuzzy msgid "" "Make the change in the ``.rst`` file (beware, the dashes under the title " @@ -2127,7 +2133,7 @@ msgstr "" "Effectue la modification dans le fichier `.rst` (attention, les tirets " "sous le titre doivent être de la même longueur que le titre lui-même)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:310 #, fuzzy msgid "" "Build the docs and `check the result `_" -#: ../../source/contributor-tutorial-contribute-on-github.rst:283 +#: ../../source/contributor-tutorial-contribute-on-github.rst:314 msgid "Rename file" msgstr "Renommer le fichier" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:316 msgid "" "You might have noticed that the file name still reflects the old wording." " If we just change the file, then we break all existing links to it - it " @@ -2153,22 +2159,22 @@ msgstr "" "important** d'éviter cela, car briser des liens peut nuire à notre " "classement dans les moteurs de recherche." -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 +#: ../../source/contributor-tutorial-contribute-on-github.rst:320 #, fuzzy msgid "Here's how to change the file name:" msgstr "Voici comment changer le nom du fichier :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:322 #, fuzzy msgid "Change the file name to ``save-progress.rst``" msgstr "Change le nom du fichier en `save-progress.rst`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +#: ../../source/contributor-tutorial-contribute-on-github.rst:323 #, fuzzy msgid "Add a redirect rule to ``doc/source/conf.py``" msgstr "Ajouter une règle de redirection à `doc/source/conf.py`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:325 #, fuzzy msgid "" "This will cause a redirect from ``saving-progress.html`` to ``save-" @@ -2177,11 +2183,11 @@ msgstr "" "Cela entraînera une redirection de `saving-progress.html` vers `save-" "progress.html`, les anciens liens continueront à fonctionner." -#: ../../source/contributor-tutorial-contribute-on-github.rst:296 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Apply changes in the index file" msgstr "Applique les changements dans le fichier d'index" -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +#: ../../source/contributor-tutorial-contribute-on-github.rst:331 #, fuzzy msgid "" "For the lateral navigation bar to work properly, it is very important to " @@ -2192,16 +2198,16 @@ msgstr "" "très important de mettre également à jour le fichier `index.rst`. C'est " "là que nous définissons toute l'arborescence de la barre de navigation." -#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 #, fuzzy msgid "Find and modify the file name in ``index.rst``" msgstr "Trouve et modifie le nom du fichier dans `index.rst`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:338 msgid "Open PR" msgstr "Open PR" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:340 #, fuzzy msgid "" "Commit the changes (commit messages are always imperative: \"Do " @@ -2210,27 +2216,27 @@ msgstr "" "Valide les modifications (les messages de validation sont toujours " "impératifs : \"Fais quelque chose\", dans ce cas \"Modifie...\")" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:342 msgid "Push the changes to your fork" msgstr "Transmets les changements à ta fourchette" -#: ../../source/contributor-tutorial-contribute-on-github.rst:308 +#: ../../source/contributor-tutorial-contribute-on-github.rst:343 msgid "" "Open a PR (as shown above) with title ``docs(framework) Update how-to " "guide title``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:309 +#: ../../source/contributor-tutorial-contribute-on-github.rst:344 msgid "Wait for it to be approved!" msgstr "Attends qu'elle soit approuvée !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:310 +#: ../../source/contributor-tutorial-contribute-on-github.rst:345 msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "" "Félicitations 🥳 Tu es désormais officiellement une contributrice de " "Flower !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:314 +#: ../../source/contributor-tutorial-contribute-on-github.rst:348 #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:573 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1012 #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:811 @@ -2239,7 +2245,7 @@ msgstr "" msgid "Next steps" msgstr "Prochaines étapes" -#: ../../source/contributor-tutorial-contribute-on-github.rst:316 +#: ../../source/contributor-tutorial-contribute-on-github.rst:350 msgid "" "Once you have made your first PR, and want to contribute more, be sure to" " check out the following :" @@ -2247,37 +2253,37 @@ msgstr "" "Une fois que tu auras fait ton premier RP, et que tu voudras contribuer " "davantage, ne manque pas de consulter les sites suivants :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:318 +#: ../../source/contributor-tutorial-contribute-on-github.rst:353 #, fuzzy msgid "" ":doc:`Good first contributions `, where you should particularly look into the " -":code:`baselines` contributions." +"``baselines`` contributions." msgstr "" "`Bonnes premières contributions `_, où vous devriez " "particulièrement regarder les contributions :code:`baselines`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:322 +#: ../../source/contributor-tutorial-contribute-on-github.rst:357 #: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "Annexe" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:362 msgid "PR title format" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:364 msgid "We enforce the following PR title format:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:370 msgid "" "(or ``(:skip) `` to ignore the PR in the " "changelog)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +#: ../../source/contributor-tutorial-contribute-on-github.rst:372 msgid "" "Where ```` needs to be in ``{ci, fix, feat, docs, refactor, " "break}``, ```` should be in ``{framework, baselines, datasets, " @@ -2286,51 +2292,51 @@ msgid "" "verb in the imperative mood." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:341 +#: ../../source/contributor-tutorial-contribute-on-github.rst:377 #, fuzzy msgid "Valid examples:" msgstr "Exemples de PyTorch" -#: ../../source/contributor-tutorial-contribute-on-github.rst:343 +#: ../../source/contributor-tutorial-contribute-on-github.rst:379 msgid "``feat(framework) Add flwr build CLI command``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:344 +#: ../../source/contributor-tutorial-contribute-on-github.rst:380 msgid "``refactor(examples:skip) Improve quickstart-pytorch logging``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:345 +#: ../../source/contributor-tutorial-contribute-on-github.rst:381 msgid "``ci(*:skip) Enforce PR title format``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:347 +#: ../../source/contributor-tutorial-contribute-on-github.rst:383 #, fuzzy msgid "Invalid examples:" msgstr "Exemples de PyTorch" -#: ../../source/contributor-tutorial-contribute-on-github.rst:349 +#: ../../source/contributor-tutorial-contribute-on-github.rst:385 msgid "``feat(framework): Add flwr build CLI command`` (extra ``:``)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/contributor-tutorial-contribute-on-github.rst:386 msgid "" "``feat(*) Add flwr build CLI command`` (missing ``skip`` flag along with " "``*``)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:351 +#: ../../source/contributor-tutorial-contribute-on-github.rst:387 msgid "``feat(skip) Add flwr build CLI command`` (missing ````)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:352 +#: ../../source/contributor-tutorial-contribute-on-github.rst:388 msgid "``feat(framework) add flwr build CLI command`` (non capitalised verb)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:353 +#: ../../source/contributor-tutorial-contribute-on-github.rst:389 msgid "``feat(framework) Add flwr build CLI command.`` (dot at the end)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:354 +#: ../../source/contributor-tutorial-contribute-on-github.rst:390 msgid "``Add flwr build CLI command.`` (missing ``()``)" msgstr "" @@ -2340,8 +2346,9 @@ msgstr "Devenez un·e contributeur·ice" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 #: ../../source/docker/run-as-subprocess.rst:11 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:15 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:12 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:16 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:13 #: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" msgstr "Prérequis" @@ -2368,7 +2375,7 @@ msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 #, fuzzy msgid "" -"Flower uses :code:`pyproject.toml` to manage dependencies and configure " +"Flower uses ``pyproject.toml`` to manage dependencies and configure " "development tools (the ones which support it). Poetry is a build tool " "which supports `PEP 517 `_." msgstr "" @@ -2377,11 +2384,11 @@ msgstr "" "le supportent). Poetry est un outil qui support `PEP 517 " "`_." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:18 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:17 msgid "Developer Machine Setup" msgstr "Setup de la machine" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:21 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:20 #, fuzzy msgid "Preliminaries" msgstr "Principes" @@ -2400,113 +2407,113 @@ msgid "" "installation actions to add `brew` to your PATH." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:28 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:29 msgid "" "Install `xz` (to install different Python versions) and `pandoc` to build" -" the docs::" +" the docs:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:34 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:36 msgid "For Ubuntu" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:35 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:38 msgid "" "Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all " -"necessary packages::" +"necessary packages:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:44 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:47 #, fuzzy msgid "Create Flower Dev Environment" msgstr "Créer/Supprimer l'environment virtuel" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:46 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:49 #, fuzzy msgid "" -"1. Clone the `Flower repository `_ from " -"GitHub::" +"Clone the `Flower repository `_ from " +"GitHub:" msgstr "" "Pour commencer, cloner la `repo Flower `_" " depuis GitHub::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:52 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:56 msgid "" "Let's create the Python environment for all-things Flower. If you wish to" -" use :code:`pyenv`, we provide two convenience scripts that you can use. " -"If you prefer using something else than :code:`pyenv`, create a new " -"environment, activate and skip to the last point where all packages are " -"installed." +" use ``pyenv``, we provide two convenience scripts that you can use. If " +"you prefer using something else than ``pyenv``, create a new environment," +" activate and skip to the last point where all packages are installed." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:54 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:61 #, fuzzy msgid "" -"If you don't have :code:`pyenv` installed, the following script that will" -" install it, set it up, and create the virtual environment (with " -":code:`Python 3.9.20` by default)::" +"If you don't have ``pyenv`` installed, the following script that will " +"install it, set it up, and create the virtual environment (with ``Python " +"3.9.20`` by default):" msgstr "" "Si vous n'avez pas :code:`pyenv` installé, vous pouvez utiliser le script" " suivant qui l'installera, le configurera et créera l'environnement " "virtuel (avec :code:`Python 3.9.20` par défaut)::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:58 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:68 #, fuzzy msgid "" -"If you already have :code:`pyenv` installed (along with the :code:`pyenv-" -"virtualenv` plugin), you can use the following convenience script (with " -":code:`Python 3.9.20` by default)::" +"If you already have ``pyenv`` installed (along with the ``pyenv-" +"virtualenv`` plugin), you can use the following convenience script (with " +"``Python 3.9.20`` by default):" msgstr "" "Si vous n'avez pas :code:`pyenv` installé, vous pouvez utiliser le script" " suivant qui l'installera, le configurera et créera l'environnement " "virtuel (avec :code:`Python 3.9.20` par défaut)::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:75 #, fuzzy msgid "" -"3. Install the Flower package in development mode (think :code:`pip " -"install -e`) along with all necessary dependencies::" +"3. Install the Flower package in development mode (think ``pip install " +"-e``) along with all necessary dependencies:" msgstr "" "Troisièmement, installez le paquet Flower en mode de développement ( " ":code :`pip install -e`) avec toutes les dépendances nécessaires :" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:83 msgid "Convenience Scripts" msgstr "Scripts pratiques" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:71 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 +#, fuzzy msgid "" "The Flower repository contains a number of convenience scripts to make " -"recurring development tasks easier and less error-prone. See the " -":code:`/dev` subdirectory for a full list. The following scripts are " -"amongst the most important ones:" +"recurring development tasks easier and less error-prone. See the ``/dev``" +" subdirectory for a full list. The following scripts are amongst the most" +" important ones:" msgstr "" "La repo de Flower contient un certain nombre de scripts de commodité pour" " rendre les tâches de développement récurrentes plus faciles et moins " "problématiques. Voir le sous-répertoire :code :`/dev` pour une liste " "complète. Les scripts suivants sont parmis les plus importants :" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:90 msgid "Create/Delete Virtual Environment" msgstr "Créer/Supprimer l'environment virtuel" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:98 msgid "Compile ProtoBuf Definitions" msgstr "Compiler les définitions ProtoBuf" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:105 msgid "Auto-Format Code" msgstr "Formatter le code" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:99 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112 msgid "Run Linters and Tests" msgstr "Vérifier le format et tester le code" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:106 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 msgid "Add a pre-commit hook" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:121 msgid "" "Developers may integrate a pre-commit hook into their workflow utilizing " "the `pre-commit `_ library. The pre-" @@ -2514,50 +2521,50 @@ msgid "" "``./dev/format.sh`` and ``./dev/test.sh`` scripts." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:110 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:125 msgid "There are multiple ways developers can use this:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:127 msgid "Install the pre-commit hook to your local git directory by simply running:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:118 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133 msgid "" "Each ``git commit`` will trigger the execution of formatting and " "linting/test scripts." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135 msgid "" "If in a hurry, bypass the hook using ``--no-verify`` with the ``git " -"commit`` command. ::" +"commit`` command." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 msgid "" "For developers who prefer not to install the hook permanently, it is " "possible to execute a one-time check prior to committing changes by using" " the following command:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149 msgid "" "This executes the formatting and linting checks/tests on all the files " "without modifying the default behavior of ``git commit``." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:153 msgid "Run Github Actions (CI) locally" msgstr "Exécuter les GitHub Actions (CI) localement" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:155 #, fuzzy msgid "" "Developers could run the full set of Github Actions workflows under their" " local environment by using `Act `_. " "Please refer to the installation instructions under the linked repository" -" and run the next command under Flower main cloned repository folder::" +" and run the next command under Flower main cloned repository folder:" msgstr "" "Il est possible d'exécuter l'ensemble des Github Actions sous leur " "environnement local en utilisant `Act _`." @@ -2565,7 +2572,7 @@ msgstr "" "fois installé, exécuter la commande suivante dans le dossier principale " "de Flower :" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:164 msgid "" "The Flower default workflow would run by setting up the required Docker " "machines underneath." @@ -2573,40 +2580,42 @@ msgstr "" "Le workflow par défaut de Flower sera exécuté en configurant les machines" " Docker requises en arrière plan." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:147 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:168 #, fuzzy msgid "Build Release" msgstr "Inédit" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:170 +#, fuzzy msgid "" "Flower uses Poetry to build releases. The necessary command is wrapped in" -" a simple script::" +" a simple script:" msgstr "" "Flower utilise Poetry pour construire les nouvelles versions. La commande" " nécessaire est comprise dans un script simple ::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:154 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:177 +#, fuzzy msgid "" -"The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in" -" the :code:`/dist` subdirectory." +"The resulting ``.whl`` and ``.tar.gz`` releases will be stored in the " +"``/dist`` subdirectory." msgstr "" "Les versions résultantes :code:`.whl` et :code:`.tar.gz` seront stockées " "dans le sous-répertoire:code:`/dist`." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:159 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:181 #, fuzzy msgid "Build Documentation" msgstr "Amélioration de la documentation" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:161 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:183 msgid "" "Flower's documentation uses `Sphinx `_. " "There's no convenience script to re-build the documentation yet, but it's" -" pretty easy::" +" pretty easy:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:191 msgid "This will generate HTML documentation in ``doc/build/html``." msgstr "" @@ -2650,7 +2659,7 @@ msgid "" msgstr "" #: ../../source/docker/enable-tls.rst:23 -#: ../../source/docker/persist-superlink-state.rst:14 +#: ../../source/docker/persist-superlink-state.rst:15 msgid "" "If you later want to delete the directory, you can change the user ID " "back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " @@ -2674,21 +2683,21 @@ msgstr "" msgid "Understanding the command" msgstr "Entraîne le modèle" -#: ../../source/docker/enable-tls.rst:44 ../../source/docker/enable-tls.rst:91 +#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 #: ../../source/docker/enable-tls.rst:125 #: ../../source/docker/tutorial-quickstart-docker.rst:66 #: ../../source/docker/tutorial-quickstart-docker.rst:103 -#: ../../source/docker/tutorial-quickstart-docker.rst:213 -#: ../../source/docker/tutorial-quickstart-docker.rst:300 +#: ../../source/docker/tutorial-quickstart-docker.rst:217 +#: ../../source/docker/tutorial-quickstart-docker.rst:305 msgid "``docker run``: This tells Docker to run a container from an image." msgstr "" -#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 +#: ../../source/docker/enable-tls.rst:46 ../../source/docker/enable-tls.rst:93 #: ../../source/docker/enable-tls.rst:126 #: ../../source/docker/tutorial-quickstart-docker.rst:67 #: ../../source/docker/tutorial-quickstart-docker.rst:104 -#: ../../source/docker/tutorial-quickstart-docker.rst:214 -#: ../../source/docker/tutorial-quickstart-docker.rst:301 +#: ../../source/docker/tutorial-quickstart-docker.rst:218 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 msgid "``--rm``: Remove the container once it is stopped or the command exits." msgstr "" @@ -2793,19 +2802,19 @@ msgstr "" msgid "the network." msgstr "" -#: ../../source/docker/enable-tls.rst:71 +#: ../../source/docker/enable-tls.rst:72 #, fuzzy msgid "SuperNode" msgstr "flower-superlink" -#: ../../source/docker/enable-tls.rst:73 +#: ../../source/docker/enable-tls.rst:74 msgid "" "Assuming that the ``ca.crt`` certificate already exists locally, we can " "use the flag ``--volume`` to mount the local certificate into the " "container's ``/app/`` directory." msgstr "" -#: ../../source/docker/enable-tls.rst:78 +#: ../../source/docker/enable-tls.rst:79 msgid "" "If you're generating self-signed certificates and the ``ca.crt`` " "certificate doesn't exist on the SuperNode, you can copy it over after " @@ -2920,16 +2929,16 @@ msgstr "" msgid "Getting Started" msgstr "Pour commencer" -#: ../../source/docker/index.rst:20 +#: ../../source/docker/index.rst:19 msgid "Running in Production" msgstr "" -#: ../../source/docker/index.rst:29 +#: ../../source/docker/index.rst:28 #, fuzzy msgid "Advanced Options" msgstr "Options d'installation avancées" -#: ../../source/docker/index.rst:41 +#: ../../source/docker/index.rst:40 #, fuzzy msgid "Run Flower using Docker Compose" msgstr "Serveur de Flower" @@ -2952,7 +2961,7 @@ msgid "" " on your host system and a name for the database file." msgstr "" -#: ../../source/docker/persist-superlink-state.rst:10 +#: ../../source/docker/persist-superlink-state.rst:11 msgid "" "By default, the SuperLink container runs with a non-root user called " "``app`` with the user ID ``49999``. It is recommended to create a new " @@ -2960,7 +2969,7 @@ msgid "" "the mounted directory has the proper permissions." msgstr "" -#: ../../source/docker/persist-superlink-state.rst:20 +#: ../../source/docker/persist-superlink-state.rst:21 msgid "" "In the example below, we create a new directory called ``state``, change " "the user ID and tell Docker via the flag ``--volume`` to mount the local " @@ -2969,7 +2978,7 @@ msgid "" "database file." msgstr "" -#: ../../source/docker/persist-superlink-state.rst:35 +#: ../../source/docker/persist-superlink-state.rst:36 msgid "" "As soon as the SuperLink starts, the file ``state.db`` is created in the " "``state`` directory on your host system. If the file already exists, the " @@ -2994,17 +3003,17 @@ msgid "" "by-digest-immutable-identifier>`_ of the image instead of the tag." msgstr "" -#: ../../source/docker/pin-version.rst:13 +#: ../../source/docker/pin-version.rst:14 msgid "" "The following command returns the current image digest referenced by the " ":substitution-code:`superlink:|stable_flwr_version|` tag:" msgstr "" -#: ../../source/docker/pin-version.rst:22 +#: ../../source/docker/pin-version.rst:23 msgid "This will output" msgstr "" -#: ../../source/docker/pin-version.rst:29 +#: ../../source/docker/pin-version.rst:30 msgid "Next, we can pin the digest when running a new SuperLink container:" msgstr "" @@ -3051,7 +3060,7 @@ msgid "" "``USER root`` directive within your Dockerfile." msgstr "" -#: ../../source/docker/run-as-root-user.rst:29 +#: ../../source/docker/run-as-root-user.rst:30 #, fuzzy msgid "SuperNode Dockerfile" msgstr "Démarrer le serveur" @@ -3078,12 +3087,12 @@ msgid "" "done by extending the SuperNode image:" msgstr "" -#: ../../source/docker/run-as-subprocess.rst:16 +#: ../../source/docker/run-as-subprocess.rst:17 #, fuzzy msgid "Dockerfile.supernode" msgstr "Serveur de Flower" -#: ../../source/docker/run-as-subprocess.rst:30 +#: ../../source/docker/run-as-subprocess.rst:31 msgid "" "Next, build the SuperNode Docker image by running the following command " "in the directory where Dockerfile is located:" @@ -3114,79 +3123,80 @@ msgid "" " Engine via Docker Compose." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:12 msgid "" "Some quickstart examples may have limitations or requirements that " "prevent them from running on every environment. For more information, " -"please see `Limitations`_." +"please see Limitations_." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:17 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:14 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:15 #: ../../source/docker/tutorial-quickstart-docker.rst:13 msgid "Before you start, make sure that:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:19 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:16 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 #: ../../source/docker/tutorial-quickstart-docker.rst:15 msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 #: ../../source/docker/tutorial-quickstart-docker.rst:16 msgid "The Docker daemon is running." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:19 msgid "Docker Compose is `installed `_." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:24 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:25 #, fuzzy msgid "Run the Quickstart Example" msgstr "Demande pour un nouveau Flower Example" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:26 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:27 msgid "" "Clone the quickstart example you like to run. For example, ``quickstart-" "pytorch``:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:34 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:35 msgid "" "Download the `compose.yml " "`_" " file into the example directory:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:41 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:44 #, fuzzy msgid "Build and start the services using the following command:" msgstr "Active la virtualenv en exécutant la commande suivante :" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:47 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:50 #, fuzzy msgid "" "Append the following lines to the end of the ``pyproject.toml`` file and " "save it:" msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:49 -#: ../../source/docker/tutorial-quickstart-docker.rst:319 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:52 +#: ../../source/docker/tutorial-quickstart-docker.rst:324 msgid "pyproject.toml" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:58 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 msgid "" "You can customize the string that follows ``tool.flwr.federations.`` to " "fit your needs. However, please note that the string cannot contain a dot" " (``.``)." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:64 msgid "" "In this example, ``local-deployment`` has been used. Just remember to " "replace ``local-deployment`` with your chosen name in both the " @@ -3194,57 +3204,57 @@ msgid "" "command." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:65 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 #, fuzzy msgid "Run the example:" msgstr "Fédérer l'exemple" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:71 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:74 msgid "Follow the logs of the SuperExec service:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:77 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 msgid "" "That is all it takes! You can monitor the progress of the run through the" " logs of the SuperExec." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 msgid "Run a Different Quickstart Example" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:82 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:86 msgid "" "To run a different quickstart example, such as ``quickstart-tensorflow``," " first, shut down the Docker Compose services of the current example:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:89 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:93 msgid "After that, you can repeat the steps above." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:92 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:98 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:96 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 #, fuzzy msgid "Limitations" msgstr "Simulation de moniteur" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:97 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 #, fuzzy msgid "Quickstart Example" msgstr "Démarrage rapide de JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:99 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 #, fuzzy msgid "quickstart-fastai" msgstr "Démarrage rapide fastai" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:100 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 #: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 #: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 #: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 @@ -3252,19 +3262,19 @@ msgstr "Démarrage rapide fastai" msgid "None" msgstr "Aucun" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 #, fuzzy msgid "quickstart-huggingface" msgstr "Quickstart tutorials" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 #, fuzzy msgid "quickstart-jax" msgstr "Démarrage rapide de JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 #, fuzzy msgid "" "The example has not yet been updated to work with the latest ``flwr`` " @@ -3273,64 +3283,64 @@ msgstr "" "Les exemples de code couvrant scikit-learn et PyTorch Lightning ont été " "mis à jour pour fonctionner avec la dernière version de Flower." -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 #, fuzzy msgid "quickstart-mlcube" msgstr "Démarrage rapide de JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 #, fuzzy msgid "quickstart-mlx" msgstr "Démarrage rapide de JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 msgid "" "`Requires to run on macOS with Apple Silicon `_." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 #, fuzzy msgid "quickstart-monai" msgstr "Démarrage rapide de JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 #, fuzzy msgid "quickstart-pandas" msgstr "Démarrage rapide des Pandas" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:113 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 #, fuzzy msgid "quickstart-pytorch-lightning" msgstr "Démarrage rapide de PyTorch Lightning" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 msgid "" "Requires an older pip version that is not supported by the Flower Docker " "images." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 #, fuzzy msgid "quickstart-pytorch" msgstr "Démarrage rapide de PyTorch" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 #, fuzzy msgid "quickstart-sklearn-tabular" msgstr "Démarrage rapide de scikit-learn" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 #, fuzzy msgid "quickstart-tabnet" msgstr "Démarrage rapide de JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:126 #, fuzzy msgid "quickstart-tensorflow" msgstr "Démarrage rapide de TensorFlow" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:127 msgid "Only runs on AMD64." msgstr "" @@ -3346,6 +3356,209 @@ msgid "" "environment variables for a container." msgstr "" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:2 +#, fuzzy +msgid "Deploy Flower on Multiple Machines with Docker Compose" +msgstr "Démarrage rapide XGBoost" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:4 +msgid "" +"This guide will help you set up a Flower project on multiple machines " +"using Docker Compose." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:7 +msgid "" +"You will learn how to run the Flower client and server components on two " +"separate machines, with Flower configured to use TLS encryption and " +"persist SuperLink state across restarts. A server consists of a SuperLink" +" and ``SuperExec``. For more details about the Flower architecture, refer" +" to the :doc:`../explanation-flower-architecture` explainer page." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:13 +msgid "" +"This guide assumes you have completed the :doc:`tutorial-quickstart-" +"docker-compose` tutorial. It is highly recommended that you follow and " +"understand the contents of that tutorial before proceeding with this " +"guide." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:20 +msgid "Before you begin, make sure you have the following prerequisites:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:23 +msgid "The Docker daemon is running on your local machine and the remote machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:24 +msgid "" +"Docker Compose V2 is installed on both your local machine and the remote " +"machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:25 +msgid "You can connect to the remote machine from your local machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:26 +msgid "Ports ``9091`` and ``9093`` are accessible on the remote machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:30 +msgid "" +"The guide uses the |quickstart_sklearn_tabular|_ example as an example " +"project." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:32 +msgid "" +"If your project has a different name or location, please remember to " +"adjust the commands/paths accordingly." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker.rst:19 +msgid "Step 1: Set Up" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:38 +msgid "Clone the Flower repository and change to the ``distributed`` directory:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:45 +msgid "Get the IP address from the remote machine and save it for later." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 +msgid "" +"Use the ``certs.yml`` Compose file to generate your own self-signed " +"certificates. If you have certificates, you can continue with Step 2." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:51 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:221 +msgid "These certificates should be used only for development purposes." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:53 +msgid "" +"For production environments, you may have to use dedicated services to " +"obtain your certificates." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:56 +msgid "" +"First, set the environment variables ``SUPERLINK_IP`` and " +"``SUPEREXEC_IP`` with the IP address from the remote machine. For " +"example, if the IP is ``192.168.2.33``, execute:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:65 +msgid "Next, generate the self-signed certificates:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:72 +msgid "Step 2: Copy the Server Compose Files" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:74 +msgid "" +"Use the method that works best for you to copy the ``server`` directory, " +"the certificates, and your Flower project to the remote machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:77 +msgid "For example, you can use ``scp`` to copy the directories:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:87 +#, fuzzy +msgid "Step 3: Start the Flower Server Components" +msgstr "Démarrer le serveur" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:89 +msgid "" +"Log into the remote machine using ``ssh`` and run the following command " +"to start the SuperLink and SuperExec services:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:102 +msgid "" +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``server`` Docker Compose files." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:105 +msgid "Go back to your terminal on your local machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:108 +#, fuzzy +msgid "Step 4: Start the Flower Client Components" +msgstr "Démarrer le serveur" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:110 +msgid "" +"On your local machine, run the following command to start the client " +"components:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:120 +msgid "" +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``client`` Docker Compose files." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:124 +#, fuzzy +msgid "Step 5: Run Your Flower Project" +msgstr "Serveur de Flower" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:126 +msgid "" +"Specify the remote SuperExec IP addresses and the path to the root " +"certificate in the ``[tool.flwr.federations.remote-superexec]`` table in " +"the ``pyproject.toml`` file. Here, we have named our remote federation " +"``remote-superexec``:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:130 +#, fuzzy +msgid "examples/quickstart-sklearn-tabular/pyproject.toml" +msgstr "Démarrage rapide de scikit-learn" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 +msgid "" +"The Path of the ``root-certificates`` should be relative to the location " +"of the ``pyproject.toml`` file." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:142 +msgid "To run the project, execute:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:148 +msgid "" +"That's it! With these steps, you've set up Flower on two separate " +"machines and are ready to start using it." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +msgid "Step 6: Clean Up" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:154 +#, fuzzy +msgid "Shut down the Flower client components:" +msgstr "Client de Flower" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 +msgid "Shut down the Flower server components and delete the SuperLink state:" +msgstr "" + #: ../../source/docker/tutorial-quickstart-docker.rst:2 #, fuzzy msgid "Quickstart with Docker" @@ -3364,12 +3577,7 @@ msgid "" " understanding the basic workflow that uses the minimum configurations." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:21 -#: ../../source/docker/tutorial-quickstart-docker.rst:19 -msgid "Step 1: Set Up" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:31 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:32 #: ../../source/docker/tutorial-quickstart-docker.rst:21 msgid "Create a new Flower project (PyTorch):" msgstr "" @@ -3391,7 +3599,7 @@ msgstr "" msgid "Step 2: Start the SuperLink" msgstr "Démarrer le serveur" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:60 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:62 #: ../../source/docker/tutorial-quickstart-docker.rst:52 #, fuzzy msgid "Open your terminal and run:" @@ -3418,8 +3626,8 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst:71 #: ../../source/docker/tutorial-quickstart-docker.rst:108 -#: ../../source/docker/tutorial-quickstart-docker.rst:215 -#: ../../source/docker/tutorial-quickstart-docker.rst:304 +#: ../../source/docker/tutorial-quickstart-docker.rst:219 +#: ../../source/docker/tutorial-quickstart-docker.rst:309 msgid "" "``--network flwr-network``: Make the container join the network named " "``flwr-network``." @@ -3431,8 +3639,8 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst:73 #: ../../source/docker/tutorial-quickstart-docker.rst:110 -#: ../../source/docker/tutorial-quickstart-docker.rst:216 -#: ../../source/docker/tutorial-quickstart-docker.rst:306 +#: ../../source/docker/tutorial-quickstart-docker.rst:220 +#: ../../source/docker/tutorial-quickstart-docker.rst:311 msgid "" "``--detach``: Run the container in the background, freeing up the " "terminal." @@ -3550,13 +3758,13 @@ msgid "" "extends the ClientApp image and installs the required dependencies." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:148 +#: ../../source/docker/tutorial-quickstart-docker.rst:149 msgid "" "Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " "the following code into it:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:150 +#: ../../source/docker/tutorial-quickstart-docker.rst:152 #, fuzzy msgid "Dockerfile.clientapp" msgstr "Flower ClientApp." @@ -3639,7 +3847,7 @@ msgstr "" msgid "the default command run when the container is started." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:184 +#: ../../source/docker/tutorial-quickstart-docker.rst:186 msgid "" "Note that `flwr `__ is already installed " "in the ``flwr/clientapp`` base image, so only other package dependencies " @@ -3648,20 +3856,20 @@ msgid "" "after it has been copied into the Docker image (see line 5)." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:189 +#: ../../source/docker/tutorial-quickstart-docker.rst:192 msgid "" "Next, build the ClientApp Docker image by running the following command " "in the directory where the Dockerfile is located:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:198 +#: ../../source/docker/tutorial-quickstart-docker.rst:201 msgid "" "The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " "Remember that these values are merely examples, and you can customize " "them according to your requirements." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:201 +#: ../../source/docker/tutorial-quickstart-docker.rst:205 #, fuzzy msgid "Start the first ClientApp container:" msgstr "Utilisation du moteur du client virtuel" @@ -3682,34 +3890,34 @@ msgstr "" msgid "``supernode-1:9094``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:222 +#: ../../source/docker/tutorial-quickstart-docker.rst:226 msgid "Start the second ClientApp container:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:233 +#: ../../source/docker/tutorial-quickstart-docker.rst:237 #, fuzzy msgid "Step 5: Start the SuperExec" msgstr "Démarrer le serveur" -#: ../../source/docker/tutorial-quickstart-docker.rst:235 +#: ../../source/docker/tutorial-quickstart-docker.rst:239 msgid "" "The procedure for building and running a SuperExec image is almost " "identical to the ClientApp image." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:237 +#: ../../source/docker/tutorial-quickstart-docker.rst:242 msgid "" "Similar to the ClientApp image, you will need to create a Dockerfile that" " extends the SuperExec image and installs the required FAB dependencies." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:240 +#: ../../source/docker/tutorial-quickstart-docker.rst:245 msgid "" "Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " "the following code in:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:242 +#: ../../source/docker/tutorial-quickstart-docker.rst:248 msgid "Dockerfile.superexec" msgstr "" @@ -3739,13 +3947,13 @@ msgstr "" msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:277 +#: ../../source/docker/tutorial-quickstart-docker.rst:283 msgid "" "Afterward, in the directory that holds the Dockerfile, execute this " "Docker command to build the SuperExec image:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:285 +#: ../../source/docker/tutorial-quickstart-docker.rst:290 #, fuzzy msgid "Start the SuperExec container:" msgstr "Démarrer le serveur" @@ -3760,7 +3968,7 @@ msgid "" "``http://localhost:9093``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:305 +#: ../../source/docker/tutorial-quickstart-docker.rst:310 msgid "``--name superexec``: Assign the name ``superexec`` to the container." msgstr "" @@ -3780,79 +3988,79 @@ msgstr "" msgid "connect to the SuperLink running on port ``9091``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:315 +#: ../../source/docker/tutorial-quickstart-docker.rst:320 msgid "Step 6: Run the Quickstart Project" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:317 +#: ../../source/docker/tutorial-quickstart-docker.rst:322 #, fuzzy msgid "Add the following lines to the ``pyproject.toml``:" msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." -#: ../../source/docker/tutorial-quickstart-docker.rst:326 +#: ../../source/docker/tutorial-quickstart-docker.rst:331 msgid "Run the ``quickstart-docker`` project by executing the command:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:332 +#: ../../source/docker/tutorial-quickstart-docker.rst:337 msgid "Follow the SuperExec logs to track the execution of the run:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:339 +#: ../../source/docker/tutorial-quickstart-docker.rst:344 #, fuzzy msgid "Step 7: Update the Application" msgstr "Étape 3 : Sérialisation personnalisée" -#: ../../source/docker/tutorial-quickstart-docker.rst:341 +#: ../../source/docker/tutorial-quickstart-docker.rst:346 msgid "" -"Change the application code. For example, change the ``seed`` in " +"Change the application code. For example, change the ``seed`` in " "``quickstart_docker/task.py`` to ``43`` and save it:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:344 +#: ../../source/docker/tutorial-quickstart-docker.rst:349 #, fuzzy msgid "quickstart_docker/task.py" msgstr "Démarrage rapide des Pandas" -#: ../../source/docker/tutorial-quickstart-docker.rst:351 +#: ../../source/docker/tutorial-quickstart-docker.rst:356 msgid "Stop the current ClientApp containers:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:357 +#: ../../source/docker/tutorial-quickstart-docker.rst:362 #, fuzzy msgid "Rebuild the FAB and ClientApp image:" msgstr "Chargement des données" -#: ../../source/docker/tutorial-quickstart-docker.rst:363 +#: ../../source/docker/tutorial-quickstart-docker.rst:368 msgid "Launch two new ClientApp containers based on the newly built image:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:378 +#: ../../source/docker/tutorial-quickstart-docker.rst:383 msgid "Run the updated project:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:385 +#: ../../source/docker/tutorial-quickstart-docker.rst:390 msgid "Step 8: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:387 +#: ../../source/docker/tutorial-quickstart-docker.rst:392 msgid "Remove the containers and the bridge network:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:401 -#: ../../source/docker/tutorial-quickstart-docker.rst:399 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:408 +#: ../../source/docker/tutorial-quickstart-docker.rst:404 #, fuzzy msgid "Where to Go Next" msgstr "Par où commencer" -#: ../../source/docker/tutorial-quickstart-docker.rst:401 +#: ../../source/docker/tutorial-quickstart-docker.rst:406 msgid ":doc:`enable-tls`" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:402 +#: ../../source/docker/tutorial-quickstart-docker.rst:407 msgid ":doc:`persist-superlink-state`" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:403 +#: ../../source/docker/tutorial-quickstart-docker.rst:408 msgid ":doc:`tutorial-quickstart-docker-compose`" msgstr "" @@ -3875,179 +4083,179 @@ msgid "" "configuration that best suits your project's needs." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:23 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:24 msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:37 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:38 msgid "" "Export the path of the newly created project. The path should be relative" " to the location of the Docker Compose files:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:44 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:45 msgid "" "Setting the ``PROJECT_DIR`` helps Docker Compose locate the " "``pyproject.toml`` file, allowing it to install dependencies in the " "SuperExec and SuperNode images correctly." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:48 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:49 #, fuzzy msgid "Step 2: Run Flower in Insecure Mode" msgstr "Serveur de Flower" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:50 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 msgid "" "To begin, start Flower with the most basic configuration. In this setup, " "Flower will run without TLS and without persisting the state." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:55 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:56 msgid "" "Without TLS, the data sent between the services remains **unencrypted**. " "Use it only for development purposes." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:58 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:59 msgid "" "For production-oriented use cases, :ref:`enable TLS` for secure data" " transmission." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:68 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:179 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 msgid "``docker compose``: The Docker command to run the Docker Compose tool." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:69 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:180 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 msgid "" "``-f compose.yml``: Specify the YAML file that contains the basic Flower " "service definitions." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 msgid "" "``--build``: Rebuild the images for each service if they don't already " "exist." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:186 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:191 msgid "" "``-d``: Detach the containers from the terminal and run them in the " "background." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:74 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 msgid "Step 3: Run the Quickstart Project" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:78 msgid "" "Now that the Flower services have been started via Docker Compose, it is " "time to run the quickstart example." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:79 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:81 msgid "" "To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" " the SuperExec addresses in the ``pyproject.toml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:82 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:228 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:86 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:234 msgid "quickstart-compose/pyproject.toml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:91 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:93 msgid "Execute the command to run the quickstart example:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:97 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:99 msgid "Monitor the SuperExec logs and wait for the summary to appear:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:104 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 #, fuzzy msgid "Step 4: Update the Application" msgstr "Étape 3 : Sérialisation personnalisée" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 msgid "In the next step, change the application code." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 msgid "" "For example, go to the ``task.py`` file in the ``quickstart-" "compose/quickstart_compose/`` directory and add a ``print`` call in the " "``get_weights`` function:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:111 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:114 msgid "quickstart-compose/quickstart_compose/task.py" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:120 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 #, fuzzy msgid "Rebuild and restart the services." msgstr "Nous pouvons déjà démarrer le *serveur* :" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:124 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 msgid "" "If you have modified the dependencies listed in your ``pyproject.toml`` " "file, it is essential to rebuild images." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:127 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:132 msgid "If you haven't made any changes, you can skip this step." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:134 msgid "Run the following command to rebuild and restart the services:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:135 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:140 msgid "Run the updated quickstart example:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:142 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:147 msgid "In the SuperExec logs, you should find the ``Get weights`` line:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:159 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:164 msgid "Step 5: Persisting the SuperLink State" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:161 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 msgid "" "In this step, Flower services are configured to persist the state of the " "SuperLink service, ensuring that it maintains its state even after a " "restart." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 msgid "" "When working with Docker Compose on Linux, you may need to create the " "``state`` directory first and change its ownership to ensure proper " "access and permissions." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:169 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:174 msgid "" "For more information, consult the following page: :doc:`persist-" "superlink-state`." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:220 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:176 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 msgid "Run the command:" msgstr "" @@ -4068,17 +4276,17 @@ msgid "" "rules>`_." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:188 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:193 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:375 msgid "Rerun the ``quickstart-compose`` project:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:194 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:199 msgid "Check the content of the ``state`` directory:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:201 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:206 msgid "" "You should see a ``state.db`` file in the ``state`` directory. If you " "restart the service, the state file will be used to restore the state " @@ -4086,124 +4294,120 @@ msgid "" "if the containers are stopped and started again." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:208 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 msgid "Step 6: Run Flower with TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:210 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:216 msgid "" "To demonstrate how to enable TLS, generate self-signed certificates using" " the ``certs.yml`` Compose file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:215 -msgid "These certificates should be used only for development purposes." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:217 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 msgid "" "For production environments, use a service like `Let's Encrypt " "`_ to obtain your certificates." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:235 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 msgid "Restart the services with TLS enabled:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:249 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:255 msgid "Step 7: Add another SuperNode" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:251 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 msgid "" "You can add more SuperNodes and ClientApps by duplicating their " "definitions in the ``compose.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:254 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:260 msgid "" "Just give each new SuperNode and ClientApp service a unique service name " "like ``supernode-3``, ``clientapp-3``, etc." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:263 msgid "In ``compose.yml``, add the following:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:259 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:265 msgid "compose.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:310 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:316 msgid "" "If you also want to enable TLS for the new SuperNodes, duplicate the " "SuperNode definition for each new SuperNode service in the ``with-" "tls.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:313 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:319 msgid "" "Make sure that the names of the services match with the one in the " "``compose.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:315 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:321 msgid "In ``with-tls.yml``, add the following:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:317 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:323 msgid "with-tls.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:339 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:345 msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:341 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:347 msgid "" "To run Flower with persisted SuperLink state and enabled TLS, a slight " "change in the ``with-state.yml`` file is required:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:344 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:350 msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:346 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:352 msgid "with-state.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:363 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 #, fuzzy msgid "Restart the services:" msgstr "Démarrer le serveur" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:377 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:383 msgid "Step 9: Merge Multiple Compose Files" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:379 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:385 msgid "" "You can merge multiple Compose files into a single file. For instance, if" " you wish to combine the basic configuration with the TLS configuration, " "execute the following command:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:387 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:394 msgid "" "This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" " a new file called ``my_compose.yml``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:391 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:398 msgid "Step 10: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:393 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 msgid "Remove all services and volumes:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:403 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:410 #, fuzzy msgid ":doc:`run-quickstart-examples-docker-compose`" msgstr "Démarrage rapide XGBoost" @@ -4219,7 +4423,7 @@ msgid "" " `Docker Hub `__." msgstr "" -#: ../../source/docker/use-a-different-version.rst:9 +#: ../../source/docker/use-a-different-version.rst:10 msgid "" "When using Flower nightly, the SuperLink nightly image must be paired " "with the corresponding SuperNode and ServerApp nightly images released on" @@ -4254,18 +4458,18 @@ msgstr "" "fédération `_." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 msgid "Centralized Training" msgstr "Formation centralisée" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:14 #, fuzzy msgid "" "All files are revised based on :doc:`Example: PyTorch - From Centralized " "To Federated `. The only " -"thing to do is modifying the file called :code:`cifar.py`, revised part " -"is shown below:" +"thing to do is modifying the file called ``cifar.py``, revised part is " +"shown below:" msgstr "" "Tous les fichiers sont révisés sur la base de `Exemple : PyTorch - From " "Centralized To Federated `, the following parts are" -" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " -"function in :code:`client.py` needed to revise. If not, please read the " -":doc:`Example: PyTorch - From Centralized To Federated `. first." +" easy to follow, only ``get_parameters`` and ``set_parameters`` function " +"in ``client.py`` needed to revise. If not, please read the :doc:`Example:" +" PyTorch - From Centralized To Federated `. first." msgstr "" "Si vous avez lu `Exemple : PyTorch - From Centralized To Federated " "`. d'abord." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:66 +#, fuzzy msgid "" "Our example consists of one *server* and two *clients*. In FedBN, " -":code:`server.py` keeps unchanged, we can start the server directly." +"``server.py`` keeps unchanged, we can start the server directly." msgstr "" "Notre exemple consiste en un *serveur* et deux *clients*. Dans FedBN, " ":code:`server.py` reste inchangé, nous pouvons démarrer le serveur " "directement." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:73 +#, fuzzy msgid "" -"Finally, we will revise our *client* logic by changing " -":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " -"we will exclude batch normalization parameters from model parameter list " -"when sending to or receiving from the server." +"Finally, we will revise our *client* logic by changing ``get_parameters``" +" and ``set_parameters`` in ``client.py``, we will exclude batch " +"normalization parameters from model parameter list when sending to or " +"receiving from the server." msgstr "" "Enfin, nous allons réviser notre logique *client* en modifiant " ":code:`get_parameters` et :code:`set_parameters` dans :code:`client.py`, " @@ -4347,11 +4553,11 @@ msgstr "" "des paramètres du modèle lors de l'envoi ou de la réception depuis le " "serveur." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:102 msgid "Now, you can now open two additional terminal windows and run" msgstr "Tu peux maintenant ouvrir deux autres fenêtres de terminal et lancer" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:108 msgid "" "in each window (make sure that the server is still running before you do " "so) and see your (previously centralized) PyTorch project run federated " @@ -4362,13 +4568,13 @@ msgstr "" "(auparavant centralisé) exécuter l'apprentissage fédéré avec la stratégie" " FedBN sur deux clients. Félicitations !" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 -#: ../../source/tutorial-quickstart-jax.rst:283 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:113 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:349 +#: ../../source/tutorial-quickstart-jax.rst:319 msgid "Next Steps" msgstr "Prochaines étapes" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:115 #, fuzzy msgid "" "The full source code for this example can be found `here " @@ -4414,7 +4620,7 @@ msgstr "" "Ensuite, nous nous appuyons sur le code d'entraînement centralisé pour " "exécuter l'entraînement de manière fédérée." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:14 msgid "" "We begin with a brief description of the centralized CNN training code. " "If you want a more in-depth explanation of what's going on then have a " @@ -4426,14 +4632,15 @@ msgstr "" "passe, jette un coup d'œil au tutoriel officiel `PyTorch " "`_." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:18 +#, fuzzy msgid "" -"Let's create a new file called :code:`cifar.py` with all the components " +"Let's create a new file called ``cifar.py`` with all the components " "required for a traditional (centralized) training on CIFAR-10. First, all" -" required packages (such as :code:`torch` and :code:`torchvision`) need " -"to be imported. You can see that we do not import any package for " -"federated learning. You can keep all these imports as they are even when " -"we add the federated learning components at a later point." +" required packages (such as ``torch`` and ``torchvision``) need to be " +"imported. You can see that we do not import any package for federated " +"learning. You can keep all these imports as they are even when we add the" +" federated learning components at a later point." msgstr "" "Créons un nouveau fichier appelé :code:`cifar.py` avec tous les " "composants requis pour une formation traditionnelle (centralisée) sur le " @@ -4443,30 +4650,33 @@ msgstr "" "toutes ces importations telles quelles même lorsque nous ajouterons les " "composants d'apprentissage fédéré à un moment ultérieur." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:36 +#, fuzzy msgid "" "As already mentioned we will use the CIFAR-10 dataset for this machine " "learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in :code:`class Net()`." +"Neural Network) is defined in ``class Net()``." msgstr "" "Comme nous l'avons déjà mentionné, nous utiliserons l'ensemble de données" " CIFAR-10 pour cette charge de travail d'apprentissage automatique. " "L'architecture du modèle (un réseau neuronal convolutif très simple) est " "définie dans :code:`class Net()`." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:62 +#, fuzzy msgid "" -"The :code:`load_data()` function loads the CIFAR-10 training and test " -"sets. The :code:`transform` normalized the data after loading." +"The ``load_data()`` function loads the CIFAR-10 training and test sets. " +"The ``transform`` normalized the data after loading." msgstr "" "La fonction :code:`load_data()` charge les ensembles d'entraînement et de" " test CIFAR-10. La fonction :code:`transform` normalise les données après" " leur chargement." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:84 +#, fuzzy msgid "" -"We now need to define the training (function :code:`train()`) which loops" -" over the training set, measures the loss, backpropagates it, and then " +"We now need to define the training (function ``train()``) which loops " +"over the training set, measures the loss, backpropagates it, and then " "takes one optimizer step for each batch of training examples." msgstr "" "Nous devons maintenant définir la formation (fonction :code:`train()`) " @@ -4474,17 +4684,18 @@ msgstr "" "rétropropage, puis effectue une étape d'optimisation pour chaque lot " "d'exemples de formation." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:88 +#, fuzzy msgid "" -"The evaluation of the model is defined in the function :code:`test()`. " -"The function loops over all test samples and measures the loss of the " -"model based on the test dataset." +"The evaluation of the model is defined in the function ``test()``. The " +"function loops over all test samples and measures the loss of the model " +"based on the test dataset." msgstr "" "L'évaluation du modèle est définie dans la fonction :code:`test()`. La " "fonction boucle sur tous les échantillons de test et mesure la perte du " "modèle en fonction de l'ensemble des données de test." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:149 msgid "" "Having defined the data loading, model architecture, training, and " "evaluation we can put everything together and train our CNN on CIFAR-10." @@ -4493,7 +4704,7 @@ msgstr "" "la formation et l'évaluation, nous pouvons tout mettre ensemble et former" " notre CNN sur CIFAR-10." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:177 msgid "" "So far, this should all look fairly familiar if you've used PyTorch " "before. Let's take the next step and use what we've built to create a " @@ -4505,7 +4716,7 @@ msgstr "" " avons construit pour créer un simple système d'apprentissage fédéré " "composé d'un serveur et de deux clients." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:184 msgid "" "The simple machine learning project discussed in the previous section " "trains the model on a single dataset (CIFAR-10), we call this centralized" @@ -4525,7 +4736,7 @@ msgstr "" "changer la plupart de ton code et tout mettre en place à partir de zéro, " "ce qui peut représenter un effort considérable." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 msgid "" "However, with Flower you can evolve your pre-existing code into a " "federated learning setup without the need for a major rewrite." @@ -4534,11 +4745,12 @@ msgstr "" "une configuration d'apprentissage fédéré sans avoir besoin d'une " "réécriture majeure." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:194 +#, fuzzy msgid "" "The concept is easy to understand. We have to start a *server* and then " -"use the code in :code:`cifar.py` for the *clients* that are connected to " -"the *server*. The *server* sends model parameters to the clients. The " +"use the code in ``cifar.py`` for the *clients* that are connected to the " +"*server*. The *server* sends model parameters to the clients. The " "*clients* run the training and update the parameters. The updated " "parameters are sent back to the *server* which averages all received " "parameter updates. This describes one round of the federated learning " @@ -4553,13 +4765,14 @@ msgstr "" "un tour du processus d'apprentissage fédéré et nous répétons cette " "opération pour plusieurs tours." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 -#: ../../source/tutorial-quickstart-jax.rst:129 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:201 +#: ../../source/tutorial-quickstart-jax.rst:147 +#, fuzzy msgid "" "Our example consists of one *server* and two *clients*. Let's set up " -":code:`server.py` first. The *server* needs to import the Flower package " -":code:`flwr`. Next, we use the :code:`start_server` function to start a " -"server and tell it to perform three rounds of federated learning." +"``server.py`` first. The *server* needs to import the Flower package " +"``flwr``. Next, we use the ``start_server`` function to start a server " +"and tell it to perform three rounds of federated learning." msgstr "" "Notre exemple consiste en un *serveur* et deux *clients*. Commençons par " "configurer :code:`server.py`. Le *serveur* doit importer le paquet Flower" @@ -4567,35 +4780,36 @@ msgstr "" "pour démarrer un serveur et lui demander d'effectuer trois cycles " "d'apprentissage fédéré." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -#: ../../source/tutorial-quickstart-jax.rst:139 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:215 +#: ../../source/tutorial-quickstart-jax.rst:161 msgid "We can already start the *server*:" msgstr "Nous pouvons déjà démarrer le *serveur* :" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 +#, fuzzy msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined centralized training in :code:`cifar.py`. " -"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " -"update the parameters on our PyTorch model:" +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined centralized training in ``cifar.py``. Our " +"*client* needs to import ``flwr``, but also ``torch`` to update the " +"parameters on our PyTorch model:" msgstr "" "Enfin, nous allons définir notre logique *client* dans :code:`client.py` " "et nous appuyer sur la formation centralisée définie précédemment dans " ":code:`cifar.py`. Notre *client* doit importer :code:`flwr`, mais aussi " ":code:`torch` pour mettre à jour les paramètres de notre modèle PyTorch :" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:238 +#, fuzzy msgid "" "Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " -"some of the boilerplate that would otherwise be necessary. " -":code:`CifarClient` needs to implement four methods, two methods for " -"getting/setting model parameters, one method for training the model, and " -"one method for testing the model:" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``CifarClient``. ``NumPyClient`` is slightly easier to implement " +"than ``Client`` if you use a framework with good NumPy interoperability " +"(like PyTorch or TensorFlow/Keras) because it avoids some of the " +"boilerplate that would otherwise be necessary. ``CifarClient`` needs to " +"implement four methods, two methods for getting/setting model parameters," +" one method for training the model, and one method for testing the model:" msgstr "" "Implementing a Flower *client* basically means implementing a subclass of" " either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " @@ -4608,52 +4822,56 @@ msgstr "" "getting/setting model parameters, one method for training the model, and " "one method for testing the model:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -msgid ":code:`set_parameters`" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#, fuzzy +msgid "``set_parameters``" msgstr ":code:`set_parameters`" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -#: ../../source/tutorial-quickstart-jax.rst:166 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:248 +#: ../../source/tutorial-quickstart-jax.rst:192 msgid "" "set the model parameters on the local model that are received from the " "server" msgstr "règle les paramètres du modèle local reçus du serveur" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 -#: ../../source/tutorial-quickstart-jax.rst:168 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#: ../../source/tutorial-quickstart-jax.rst:194 +#, fuzzy msgid "" -"loop over the list of model parameters received as NumPy " -":code:`ndarray`'s (think list of neural network layers)" +"loop over the list of model parameters received as NumPy ``ndarray``'s " +"(think list of neural network layers)" msgstr "" "boucle sur la liste des paramètres du modèle reçus sous forme de NumPy " ":code:`ndarray`'s (pensez à la liste des couches du réseau neuronal)" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -#: ../../source/tutorial-quickstart-jax.rst:169 -#: ../../source/tutorial-quickstart-scikitlearn.rst:118 -msgid ":code:`get_parameters`" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#: ../../source/tutorial-quickstart-scikitlearn.rst:129 +#, fuzzy +msgid "``get_parameters``" msgstr ":code:`get_parameters`" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 -#: ../../source/tutorial-quickstart-jax.rst:170 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#, fuzzy msgid "" -"get the model parameters and return them as a list of NumPy " -":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" +"get the model parameters and return them as a list of NumPy ``ndarray``'s" +" (which is what ``flwr.client.NumPyClient`` expects)" msgstr "" "récupère les paramètres du modèle et les renvoie sous forme de liste de " ":code:`ndarray` NumPy (ce qui correspond à ce que " ":code:`flwr.client.NumPyClient` attend)" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid ":code:`fit`" -msgstr ":code:`fit`" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +msgid "``fit``" +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 -#: ../../source/tutorial-quickstart-jax.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:176 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:255 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:260 +#: ../../source/tutorial-quickstart-jax.rst:200 +#: ../../source/tutorial-quickstart-jax.rst:205 msgid "" "update the parameters of the local model with the parameters received " "from the server" @@ -4661,39 +4879,40 @@ msgstr "" "mettre à jour les paramètres du modèle local avec les paramètres reçus du" " serveur" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 msgid "train the model on the local training set" msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:258 msgid "get the updated local model weights and return them to the server" msgstr "récupère les poids du modèle local mis à jour et les renvoie au serveur" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -#: ../../source/tutorial-quickstart-jax.rst:178 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid ":code:`evaluate`" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +#: ../../source/tutorial-quickstart-jax.rst:208 +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 +#, fuzzy +msgid "``evaluate``" msgstr ":code:`évaluer`" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 -#: ../../source/tutorial-quickstart-jax.rst:177 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:262 +#: ../../source/tutorial-quickstart-jax.rst:207 msgid "evaluate the updated model on the local test set" msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 msgid "return the local loss and accuracy to the server" msgstr "renvoie la perte locale et la précision au serveur" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:265 +#, fuzzy msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`test()` previously " -"defined in :code:`cifar.py`. So what we really do here is we tell Flower " -"through our :code:`NumPyClient` subclass which of our already defined " -"functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``test()`` previously defined in ``cifar.py``. " +"So what we really do here is we tell Flower through our ``NumPyClient`` " +"subclass which of our already defined functions to call for training and " +"evaluation. We included type annotations to give you a better " +"understanding of the data types that get passed around." msgstr "" "Les deux méthodes :code:`NumPyClient` :code:`fit` et :code:`evaluate` " "utilisent les fonctions :code:`train()` et :code:`test()` définies " @@ -4704,14 +4923,14 @@ msgstr "" "annotations de type pour te donner une meilleure compréhension des types " "de données qui sont transmis." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:315 #, fuzzy msgid "" "All that's left to do it to define a function that loads both model and " -"data, creates a :code:`CifarClient`, and starts this client. You load " -"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " -"with the function :code:`fl.client.start_client()` by pointing it at the " -"same IP address we used in :code:`server.py`:" +"data, creates a ``CifarClient``, and starts this client. You load your " +"data and model by using ``cifar.py``. Start ``CifarClient`` with the " +"function ``fl.client.start_client()`` by pointing it at the same IP " +"address we used in ``server.py``:" msgstr "" "Il ne reste plus qu'à définir une fonction qui charge le modèle et les " "données, crée un :code:`CifarClient` et démarre ce client. Tu charges tes" @@ -4720,14 +4939,14 @@ msgstr "" "la faisant pointer sur la même adresse IP que celle que nous avons " "utilisée dans :code:`server.py` :" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 -#: ../../source/tutorial-quickstart-jax.rst:274 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:338 +#: ../../source/tutorial-quickstart-jax.rst:309 msgid "And that's it. You can now open two additional terminal windows and run" msgstr "" "Tu peux maintenant ouvrir deux autres fenêtres de terminal et exécuter " "les commandes suivantes" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:344 msgid "" "in each window (make sure that the server is running before you do so) " "and see your (previously centralized) PyTorch project run federated " @@ -4737,7 +4956,7 @@ msgstr "" "faire) et tu verras ton projet PyTorch (auparavant centralisé) exécuter " "l'apprentissage fédéré sur deux clients. Félicitations !" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:351 #, fuzzy msgid "" "The full source code for this example: `PyTorch: From Centralized To " @@ -4758,13 +4977,13 @@ msgstr "" "d'autres clients ?" #: ../../source/explanation-differential-privacy.rst:2 -#: ../../source/explanation-differential-privacy.rst:11 +#: ../../source/explanation-differential-privacy.rst:14 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 #, fuzzy msgid "Differential Privacy" msgstr "Confidentialité différentielle" -#: ../../source/explanation-differential-privacy.rst:3 +#: ../../source/explanation-differential-privacy.rst:4 msgid "" "The information in datasets like healthcare, financial transactions, user" " preferences, etc., is valuable and has the potential for scientific " @@ -4773,7 +4992,7 @@ msgid "" "privacy." msgstr "" -#: ../../source/explanation-differential-privacy.rst:6 +#: ../../source/explanation-differential-privacy.rst:9 msgid "" "Traditional methods like anonymization alone would not work because of " "attacks like Re-identification and Data Linkage. That's where " @@ -4781,7 +5000,7 @@ msgid "" "data while ensuring the privacy of individuals." msgstr "" -#: ../../source/explanation-differential-privacy.rst:12 +#: ../../source/explanation-differential-privacy.rst:16 msgid "" "Imagine two datasets that are identical except for a single record (for " "instance, Alice's data). Differential Privacy (DP) guarantees that any " @@ -4795,7 +5014,7 @@ msgstr "" msgid "DP Intro" msgstr "" -#: ../../source/explanation-differential-privacy.rst:22 +#: ../../source/explanation-differential-privacy.rst:27 msgid "" "One of the most commonly used mechanisms to achieve DP is adding enough " "noise to the output of the analysis to mask the contribution of each " @@ -4803,12 +5022,12 @@ msgid "" "analysis." msgstr "" -#: ../../source/explanation-differential-privacy.rst:25 +#: ../../source/explanation-differential-privacy.rst:32 #, fuzzy msgid "Formal Definition" msgstr "Compiler les définitions ProtoBuf" -#: ../../source/explanation-differential-privacy.rst:26 +#: ../../source/explanation-differential-privacy.rst:34 msgid "" "Differential Privacy (DP) provides statistical guarantees against the " "information an adversary can infer through the output of a randomized " @@ -4820,13 +5039,13 @@ msgid "" "record, and for all possible outputs S ⊆ Range(A):" msgstr "" -#: ../../source/explanation-differential-privacy.rst:32 +#: ../../source/explanation-differential-privacy.rst:42 msgid "" "\\small\n" "P[M(D_{1} \\in A)] \\leq e^{\\epsilon} P[M(D_{2} \\in A)] + \\delta" msgstr "" -#: ../../source/explanation-differential-privacy.rst:38 +#: ../../source/explanation-differential-privacy.rst:47 msgid "" "The :math:`\\epsilon` parameter, also known as the privacy budget, is a " "metric of privacy loss. It also controls the privacy-utility trade-off; " @@ -4838,12 +5057,12 @@ msgid "" " change in the output due to the inclusion or removal of a single record." msgstr "" -#: ../../source/explanation-differential-privacy.rst:45 +#: ../../source/explanation-differential-privacy.rst:56 #, fuzzy msgid "Differential Privacy in Machine Learning" msgstr "Confidentialité différentielle" -#: ../../source/explanation-differential-privacy.rst:46 +#: ../../source/explanation-differential-privacy.rst:58 msgid "" "DP can be utilized in machine learning to preserve the privacy of the " "training data. Differentially private machine learning algorithms are " @@ -4858,12 +5077,12 @@ msgid "" "model's output." msgstr "" -#: ../../source/explanation-differential-privacy.rst:53 +#: ../../source/explanation-differential-privacy.rst:69 #, fuzzy msgid "Differential Privacy in Federated Learning" msgstr "Mise à l'échelle de l'apprentissage fédéré" -#: ../../source/explanation-differential-privacy.rst:54 +#: ../../source/explanation-differential-privacy.rst:71 msgid "" "Federated learning is a data minimization approach that allows multiple " "parties to collaboratively train a model without sharing their raw data. " @@ -4874,13 +5093,13 @@ msgid "" "attacks." msgstr "" -#: ../../source/explanation-differential-privacy.rst:58 +#: ../../source/explanation-differential-privacy.rst:78 msgid "" "DP can play a crucial role in federated learning to provide privacy for " "the clients' data." msgstr "" -#: ../../source/explanation-differential-privacy.rst:60 +#: ../../source/explanation-differential-privacy.rst:81 msgid "" "Depending on the granularity of privacy provision or the location of " "noise addition, different forms of DP exist in federated learning. In " @@ -4889,14 +5108,14 @@ msgid "" " the center) or at the client (also known as the local)." msgstr "" -#: ../../source/explanation-differential-privacy.rst:63 +#: ../../source/explanation-differential-privacy.rst:86 msgid "" "**Central Differential Privacy**: DP is applied by the server and the " "goal is to prevent the aggregated model from leaking information about " "each client's data." msgstr "" -#: ../../source/explanation-differential-privacy.rst:65 +#: ../../source/explanation-differential-privacy.rst:88 msgid "" "**Local Differential Privacy**: DP is applied on the client side before " "sending any information to the server and the goal is to prevent the " @@ -4905,20 +5124,20 @@ msgid "" msgstr "" #: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:68 -#: ../../source/how-to-use-differential-privacy.rst:11 +#: ../../source/explanation-differential-privacy.rst:93 +#: ../../source/how-to-use-differential-privacy.rst:15 #, fuzzy msgid "Central Differential Privacy" msgstr "Confidentialité différentielle" -#: ../../source/explanation-differential-privacy.rst:69 +#: ../../source/explanation-differential-privacy.rst:95 msgid "" "In this approach, which is also known as user-level DP, the central " "server is responsible for adding noise to the globally aggregated " "parameters. It should be noted that trust in the server is required." msgstr "" -#: ../../source/explanation-differential-privacy.rst:76 +#: ../../source/explanation-differential-privacy.rst:104 msgid "" "While there are various ways to implement central DP in federated " "learning, we concentrate on the algorithms proposed by [2] and [3]. The " @@ -4937,7 +5156,7 @@ msgstr "" msgid "clipping" msgstr "" -#: ../../source/explanation-differential-privacy.rst:89 +#: ../../source/explanation-differential-privacy.rst:120 msgid "" "Afterwards, the Gaussian mechanism is used to add noise in order to " "distort the sum of all clients' updates. The amount of noise is scaled to" @@ -4946,24 +5165,24 @@ msgid "" "noise_scale * S ) / (number of sampled clients)`." msgstr "" -#: ../../source/explanation-differential-privacy.rst:94 +#: ../../source/explanation-differential-privacy.rst:126 msgid "Clipping" msgstr "" -#: ../../source/explanation-differential-privacy.rst:96 +#: ../../source/explanation-differential-privacy.rst:128 msgid "" "There are two forms of clipping commonly used in Central DP: Fixed " "Clipping and Adaptive Clipping." msgstr "" -#: ../../source/explanation-differential-privacy.rst:98 +#: ../../source/explanation-differential-privacy.rst:131 msgid "" "**Fixed Clipping** : A predefined fix threshold is set for the magnitude " "of clients' updates. Any update exceeding this threshold is clipped back " "to the threshold value." msgstr "" -#: ../../source/explanation-differential-privacy.rst:100 +#: ../../source/explanation-differential-privacy.rst:133 msgid "" "**Adaptive Clipping** : The clipping threshold dynamically adjusts based " "on the observed update distribution [4]. It means that the clipping value" @@ -4971,7 +5190,7 @@ msgid "" "norm distribution." msgstr "" -#: ../../source/explanation-differential-privacy.rst:102 +#: ../../source/explanation-differential-privacy.rst:137 msgid "" "The choice between fixed and adaptive clipping depends on various factors" " such as privacy requirements, data distribution, model complexity, and " @@ -4979,13 +5198,13 @@ msgid "" msgstr "" #: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:105 -#: ../../source/how-to-use-differential-privacy.rst:96 +#: ../../source/explanation-differential-privacy.rst:141 +#: ../../source/how-to-use-differential-privacy.rst:113 #, fuzzy msgid "Local Differential Privacy" msgstr "Confidentialité différentielle" -#: ../../source/explanation-differential-privacy.rst:107 +#: ../../source/explanation-differential-privacy.rst:143 msgid "" "In this approach, each client is responsible for performing DP. Local DP " "avoids the need for a fully trusted aggregator, but it should be noted " @@ -4993,11 +5212,11 @@ msgid "" "comparison to central DP." msgstr "" -#: ../../source/explanation-differential-privacy.rst:116 +#: ../../source/explanation-differential-privacy.rst:152 msgid "In this explainer, we focus on two forms of achieving Local DP:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:118 +#: ../../source/explanation-differential-privacy.rst:154 msgid "" "Each client adds noise to the local updates before sending them to the " "server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " @@ -5005,37 +5224,36 @@ msgid "" "with a noise scale of σ where:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:120 +#: ../../source/explanation-differential-privacy.rst:158 msgid "" "\\small\n" "\\frac{∆ \\times \\sqrt{2 \\times " -"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" -"\n" +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}" msgstr "" -#: ../../source/explanation-differential-privacy.rst:125 +#: ../../source/explanation-differential-privacy.rst:163 msgid "" "Each client adds noise to the gradients of the model during the local " "training (DP-SGD). More specifically, in this approach, gradients are " "clipped and an amount of calibrated noise is injected into the gradients." msgstr "" -#: ../../source/explanation-differential-privacy.rst:128 +#: ../../source/explanation-differential-privacy.rst:167 msgid "" "Please note that these two approaches are providing privacy at different " "levels." msgstr "" -#: ../../source/explanation-differential-privacy.rst:131 +#: ../../source/explanation-differential-privacy.rst:169 #, fuzzy msgid "**References:**" msgstr "Référence" -#: ../../source/explanation-differential-privacy.rst:133 +#: ../../source/explanation-differential-privacy.rst:171 msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." msgstr "" -#: ../../source/explanation-differential-privacy.rst:135 +#: ../../source/explanation-differential-privacy.rst:173 #, fuzzy msgid "" "[2] McMahan et al. Learning Differentially Private Recurrent Language " @@ -5044,13 +5262,13 @@ msgstr "" "McMahan, H. Brendan, et al. \"Learning differentially private recurrent " "language models\", arXiv preprint arXiv:1710.06963 (2017)." -#: ../../source/explanation-differential-privacy.rst:137 +#: ../../source/explanation-differential-privacy.rst:175 msgid "" "[3] Geyer et al. Differentially Private Federated Learning: A Client " "Level Perspective." msgstr "" -#: ../../source/explanation-differential-privacy.rst:139 +#: ../../source/explanation-differential-privacy.rst:177 #, fuzzy msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." msgstr "" @@ -5094,17 +5312,17 @@ msgstr "" "prendre les paramètres du modèle global actuel comme entrée et renvoyer " "les résultats de l'évaluation :" -#: ../../source/explanation-federated-evaluation.rst:58 +#: ../../source/explanation-federated-evaluation.rst:61 msgid "Custom Strategies" msgstr "Stratégies personnalisées" -#: ../../source/explanation-federated-evaluation.rst:60 +#: ../../source/explanation-federated-evaluation.rst:63 +#, fuzzy msgid "" -"The :code:`Strategy` abstraction provides a method called " -":code:`evaluate` that can directly be used to evaluate the current global" -" model parameters. The current server implementation calls " -":code:`evaluate` after parameter aggregation and before federated " -"evaluation (see next paragraph)." +"The ``Strategy`` abstraction provides a method called ``evaluate`` that " +"can directly be used to evaluate the current global model parameters. The" +" current server implementation calls ``evaluate`` after parameter " +"aggregation and before federated evaluation (see next paragraph)." msgstr "" "L'abstraction :code:`Strategy` fournit une méthode appelée " ":code:`evaluate` qui peut être directement utilisée pour évaluer les " @@ -5112,27 +5330,28 @@ msgstr "" "appelle :code:`evaluate` après l'agrégation des paramètres et avant " "l'évaluation fédérée (voir le paragraphe suivant)." -#: ../../source/explanation-federated-evaluation.rst:65 +#: ../../source/explanation-federated-evaluation.rst:69 msgid "Federated Evaluation" msgstr "Évaluation fédérée" -#: ../../source/explanation-federated-evaluation.rst:68 +#: ../../source/explanation-federated-evaluation.rst:72 msgid "Implementing Federated Evaluation" msgstr "Mise en œuvre de l'évaluation fédérée" -#: ../../source/explanation-federated-evaluation.rst:70 +#: ../../source/explanation-federated-evaluation.rst:74 +#, fuzzy msgid "" -"Client-side evaluation happens in the :code:`Client.evaluate` method and " -"can be configured from the server side." +"Client-side evaluation happens in the ``Client.evaluate`` method and can " +"be configured from the server side." msgstr "" "L'évaluation côté client se fait dans la méthode :code:`Client.evaluate` " "et peut être configurée côté serveur." -#: ../../source/explanation-federated-evaluation.rst:101 +#: ../../source/explanation-federated-evaluation.rst:108 msgid "Configuring Federated Evaluation" msgstr "Configuration de l'évaluation fédérée" -#: ../../source/explanation-federated-evaluation.rst:103 +#: ../../source/explanation-federated-evaluation.rst:110 msgid "" "Federated evaluation can be configured from the server side. Built-in " "strategies support the following arguments:" @@ -5140,14 +5359,14 @@ msgstr "" "L'évaluation fédérée peut être configurée du côté du serveur. Les " "stratégies intégrées prennent en charge les arguments suivants :" -#: ../../source/explanation-federated-evaluation.rst:105 +#: ../../source/explanation-federated-evaluation.rst:113 +#, fuzzy msgid "" -":code:`fraction_evaluate`: a :code:`float` defining the fraction of " -"clients that will be selected for evaluation. If " -":code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients " -"are connected to the server, then :code:`10` will be randomly selected " -"for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, " -"federated evaluation will be disabled." +"``fraction_evaluate``: a ``float`` defining the fraction of clients that " +"will be selected for evaluation. If ``fraction_evaluate`` is set to " +"``0.1`` and ``100`` clients are connected to the server, then ``10`` will" +" be randomly selected for evaluation. If ``fraction_evaluate`` is set to " +"``0.0``, federated evaluation will be disabled." msgstr "" ":code:`fraction_evaluate` : un :code:`float` définissant la fraction de " "clients qui sera sélectionnée pour l'évaluation. Si " @@ -5156,27 +5375,27 @@ msgstr "" "aléatoirement pour l'évaluation. Si :code:`fraction_evaluate` est défini " "à :code:`0.0`, l'évaluation fédérée sera désactivée." -#: ../../source/explanation-federated-evaluation.rst:106 +#: ../../source/explanation-federated-evaluation.rst:118 +#, fuzzy msgid "" -":code:`min_evaluate_clients`: an :code:`int`: the minimum number of " -"clients to be selected for evaluation. If :code:`fraction_evaluate` is " -"set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and " -":code:`100` clients are connected to the server, then :code:`20` clients " -"will be selected for evaluation." +"``min_evaluate_clients``: an ``int``: the minimum number of clients to be" +" selected for evaluation. If ``fraction_evaluate`` is set to ``0.1``, " +"``min_evaluate_clients`` is set to 20, and ``100`` clients are connected " +"to the server, then ``20`` clients will be selected for evaluation." msgstr "" "si :code:`fraction_evaluate` est réglé sur :code:`0.1`, " ":code:`min_evaluate_clients` est réglé sur 20, et que :code:`100` clients" " sont connectés au serveur, alors :code:`20` clients seront sélectionnés " "pour l'évaluation." -#: ../../source/explanation-federated-evaluation.rst:107 +#: ../../source/explanation-federated-evaluation.rst:122 +#, fuzzy msgid "" -":code:`min_available_clients`: an :code:`int` that defines the minimum " -"number of clients which need to be connected to the server before a round" -" of federated evaluation can start. If fewer than " -":code:`min_available_clients` are connected to the server, the server " -"will wait until more clients are connected before it continues to sample " -"clients for evaluation." +"``min_available_clients``: an ``int`` that defines the minimum number of " +"clients which need to be connected to the server before a round of " +"federated evaluation can start. If fewer than ``min_available_clients`` " +"are connected to the server, the server will wait until more clients are " +"connected before it continues to sample clients for evaluation." msgstr "" ":code:`min_available_clients` : un :code:`int` qui définit le nombre " "minimum de clients qui doivent être connectés au serveur avant qu'un " @@ -5185,9 +5404,10 @@ msgstr "" "attendra que d'autres clients soient connectés avant de continuer à " "échantillonner des clients pour l'évaluation." -#: ../../source/explanation-federated-evaluation.rst:108 +#: ../../source/explanation-federated-evaluation.rst:127 +#, fuzzy msgid "" -":code:`on_evaluate_config_fn`: a function that returns a configuration " +"``on_evaluate_config_fn``: a function that returns a configuration " "dictionary which will be sent to the selected clients. The function will " "be called during each round and provides a convenient way to customize " "client-side evaluation from the server side, for example, to configure " @@ -5199,25 +5419,25 @@ msgstr "" "l'évaluation côté client depuis le côté serveur, par exemple pour " "configurer le nombre d'étapes de validation effectuées." -#: ../../source/explanation-federated-evaluation.rst:135 +#: ../../source/explanation-federated-evaluation.rst:157 msgid "Evaluating Local Model Updates During Training" msgstr "Évaluer les mises à jour du modèle local pendant la formation" -#: ../../source/explanation-federated-evaluation.rst:137 +#: ../../source/explanation-federated-evaluation.rst:159 +#, fuzzy msgid "" -"Model parameters can also be evaluated during training. " -":code:`Client.fit` can return arbitrary evaluation results as a " -"dictionary:" +"Model parameters can also be evaluated during training. ``Client.fit`` " +"can return arbitrary evaluation results as a dictionary:" msgstr "" "Les paramètres du modèle peuvent également être évalués pendant la " "formation. :code:`Client.fit` peut renvoyer des résultats d'évaluation " "arbitraires sous forme de dictionnaire :" -#: ../../source/explanation-federated-evaluation.rst:177 +#: ../../source/explanation-federated-evaluation.rst:201 msgid "Full Code Example" msgstr "Exemple de code complet" -#: ../../source/explanation-federated-evaluation.rst:179 +#: ../../source/explanation-federated-evaluation.rst:203 #, fuzzy msgid "" "For a full code example that uses both centralized and federated " @@ -5238,46 +5458,46 @@ msgid "" "learning while preserving data privacy." msgstr "" -#: ../../source/explanation-flower-architecture.rst:3 +#: ../../source/explanation-flower-architecture.rst:2 msgid "Flower Architecture" msgstr "Architecture florale" -#: ../../source/explanation-flower-architecture.rst:5 +#: ../../source/explanation-flower-architecture.rst:4 msgid "" "This page explains the architecture of deployed Flower federated learning" " system." msgstr "" -#: ../../source/explanation-flower-architecture.rst:8 +#: ../../source/explanation-flower-architecture.rst:6 msgid "" "In federated learning (FL), there is typically one server and a number of" " clients that are connected to the server. This is often called a " "federation." msgstr "" -#: ../../source/explanation-flower-architecture.rst:12 +#: ../../source/explanation-flower-architecture.rst:9 msgid "" "The role of the server is to coordinate the training process. The role of" " each client is to receive tasks from the server, execute those tasks and" " return the results back to the server." msgstr "" -#: ../../source/explanation-flower-architecture.rst:16 +#: ../../source/explanation-flower-architecture.rst:13 msgid "This is sometimes called a hub-and-spoke topology:" msgstr "" -#: ../../source/explanation-flower-architecture.rst:24 +#: ../../source/explanation-flower-architecture.rst:21 #, fuzzy msgid "Hub-and-spoke topology in federated learning" msgstr "Qu'est-ce que l'apprentissage fédéré ?" -#: ../../source/explanation-flower-architecture.rst:24 +#: ../../source/explanation-flower-architecture.rst:21 msgid "" "Hub-and-spoke topology in federated learning (one server, multiple " "clients)." msgstr "" -#: ../../source/explanation-flower-architecture.rst:26 +#: ../../source/explanation-flower-architecture.rst:23 msgid "" "In a real-world deployment, we typically want to run different projects " "on such a federation. Each project could use different hyperparameters, " @@ -5285,7 +5505,7 @@ msgid "" "different machine learning frameworks like PyTorch and TensorFlow." msgstr "" -#: ../../source/explanation-flower-architecture.rst:31 +#: ../../source/explanation-flower-architecture.rst:28 msgid "" "This is why, in Flower, both the server side and the client side are " "split into two parts. One part is long-lived and responsible for " @@ -5293,17 +5513,17 @@ msgid "" "executes task-specific code." msgstr "" -#: ../../source/explanation-flower-architecture.rst:36 +#: ../../source/explanation-flower-architecture.rst:32 msgid "A Flower `server` consists of **SuperLink** and ``ServerApp``:" msgstr "" -#: ../../source/explanation-flower-architecture.rst:38 +#: ../../source/explanation-flower-architecture.rst:34 msgid "" "**SuperLink**: a long-running process that forwards task instructions to " "clients (SuperNodes) and receives task results back." msgstr "" -#: ../../source/explanation-flower-architecture.rst:41 +#: ../../source/explanation-flower-architecture.rst:36 msgid "" "``ServerApp``: a short-lived process with project-spcific code that " "customizes all server-side aspects of federated learning systems (client " @@ -5311,18 +5531,18 @@ msgid "" "researchers and AI engineers write when they build Flower apps." msgstr "" -#: ../../source/explanation-flower-architecture.rst:47 +#: ../../source/explanation-flower-architecture.rst:41 msgid "A Flower `client` consists of **SuperNode** and ``ClientApp``:" msgstr "" -#: ../../source/explanation-flower-architecture.rst:49 +#: ../../source/explanation-flower-architecture.rst:43 msgid "" "**SuperNode**: a long-running process that connects to the SuperLink, " "asks for tasks, executes tasks (for example, \"train this model on your " "local data\") and returns task results back to the SuperLink." msgstr "" -#: ../../source/explanation-flower-architecture.rst:53 +#: ../../source/explanation-flower-architecture.rst:46 msgid "" "``ClientApp``: a short-lived process with project-specific code that " "customizes all client-side aspects of federated learning systems (local " @@ -5330,7 +5550,7 @@ msgid "" " researchers and AI engineers write when they build Flower apps." msgstr "" -#: ../../source/explanation-flower-architecture.rst:59 +#: ../../source/explanation-flower-architecture.rst:51 msgid "" "Why SuperNode and SuperLink? Well, in federated learning, the clients are" " the actual stars of the show. They hold the training data and they run " @@ -5339,30 +5559,30 @@ msgid "" "`missing link` between all those SuperNodes." msgstr "" -#: ../../source/explanation-flower-architecture.rst:71 +#: ../../source/explanation-flower-architecture.rst:62 #, fuzzy msgid "Basic Flower architecture" msgstr "Architecture florale" -#: ../../source/explanation-flower-architecture.rst:71 +#: ../../source/explanation-flower-architecture.rst:62 #, fuzzy msgid "The basic Flower architecture for federated learning." msgstr "Qu'est-ce que l'apprentissage fédéré ?" -#: ../../source/explanation-flower-architecture.rst:73 +#: ../../source/explanation-flower-architecture.rst:64 msgid "" "In a Flower app project, users will typically develop the ``ServerApp`` " "and the ``ClientApp``. All the network communication between `server` and" " `clients` is taken care of by the SuperLink and SuperNodes." msgstr "" -#: ../../source/explanation-flower-architecture.rst:79 +#: ../../source/explanation-flower-architecture.rst:70 msgid "" "For more details, please refer to the |serverapp_link|_ and " "|clientapp_link|_ documentation." msgstr "" -#: ../../source/explanation-flower-architecture.rst:82 +#: ../../source/explanation-flower-architecture.rst:73 msgid "" "With *multi-run*, multiple ``ServerApp``\\s and ``ClientApp``\\s are now " "capable of running on the same federation consisting of a single long-" @@ -5370,24 +5590,24 @@ msgid "" " referred to as `multi-tenancy` or `multi-job`." msgstr "" -#: ../../source/explanation-flower-architecture.rst:87 +#: ../../source/explanation-flower-architecture.rst:78 msgid "" "As shown in the figure below, two projects, each consisting of a " "``ServerApp`` and a ``ClientApp``, could share the same SuperLink and " "SuperNodes." msgstr "" -#: ../../source/explanation-flower-architecture.rst:97 +#: ../../source/explanation-flower-architecture.rst:87 #, fuzzy msgid "Multi-tenancy federated learning architecture" msgstr "Stratégie de moyenne fédérée." -#: ../../source/explanation-flower-architecture.rst:97 +#: ../../source/explanation-flower-architecture.rst:87 #, fuzzy msgid "Multi-tenancy federated learning architecture with Flower" msgstr "Étape 2 : Apprentissage fédéré avec Flower" -#: ../../source/explanation-flower-architecture.rst:99 +#: ../../source/explanation-flower-architecture.rst:89 msgid "" "To illustrate how multi-run works, consider one federated learning " "training run where a ``ServerApp`` and a ``ClientApp`` are participating " @@ -5395,48 +5615,48 @@ msgid "" " is selected to participate in the training run." msgstr "" -#: ../../source/explanation-flower-architecture.rst:104 +#: ../../source/explanation-flower-architecture.rst:94 msgid "" "In ``[run 1]`` below, all the SuperNodes are selected and therefore run " "their corresponding ``ClientApp``\\s:" msgstr "" -#: ../../source/explanation-flower-architecture.rst:113 +#: ../../source/explanation-flower-architecture.rst:103 #, fuzzy msgid "Multi-tenancy federated learning architecture - Run 1" msgstr "Stratégie de moyenne fédérée." -#: ../../source/explanation-flower-architecture.rst:113 +#: ../../source/explanation-flower-architecture.rst:103 msgid "" "Run 1 in a multi-run federated learning architecture with Flower. All " "SuperNodes participate in the training round." msgstr "" -#: ../../source/explanation-flower-architecture.rst:116 +#: ../../source/explanation-flower-architecture.rst:106 msgid "" "However, in ``[run 2]``, only the first and third SuperNodes are selected" " to participate in the training:" msgstr "" -#: ../../source/explanation-flower-architecture.rst:125 +#: ../../source/explanation-flower-architecture.rst:115 #, fuzzy msgid "Multi-tenancy federated learning architecture - Run 2" msgstr "Stratégie de moyenne fédérée." -#: ../../source/explanation-flower-architecture.rst:125 +#: ../../source/explanation-flower-architecture.rst:115 msgid "" "Run 2 in a multi-run federated learning architecture with Flower. Only " "the first and third SuperNodes are selected to participate in the " "training round." msgstr "" -#: ../../source/explanation-flower-architecture.rst:129 +#: ../../source/explanation-flower-architecture.rst:118 msgid "" "Therefore, with Flower multi-run, different projects (each consisting of " "a ``ServerApp`` and ``ClientApp``) can run on different sets of clients." msgstr "" -#: ../../source/explanation-flower-architecture.rst:132 +#: ../../source/explanation-flower-architecture.rst:121 msgid "" "To help you start and manage all of the concurrently executing training " "runs, Flower offers one additional long-running server-side service " @@ -5448,28 +5668,28 @@ msgid "" "``ClientApp``." msgstr "" -#: ../../source/explanation-flower-architecture.rst:141 +#: ../../source/explanation-flower-architecture.rst:128 msgid "" "This architecture allows many users to (concurrently) run their projects " "on the same federation, simply by typing ``flwr run`` on their local " "developer machine." msgstr "" -#: ../../source/explanation-flower-architecture.rst:151 +#: ../../source/explanation-flower-architecture.rst:137 msgid "Flower Deployment Engine with SuperExec" msgstr "" -#: ../../source/explanation-flower-architecture.rst:151 +#: ../../source/explanation-flower-architecture.rst:137 msgid "The SuperExec service for managing concurrent training runs in Flower." msgstr "" -#: ../../source/explanation-flower-architecture.rst:156 +#: ../../source/explanation-flower-architecture.rst:141 msgid "" "This explanation covers the Flower Deployment Engine. An explanation " "covering the Flower Simulation Engine will follow." msgstr "" -#: ../../source/explanation-flower-architecture.rst:161 +#: ../../source/explanation-flower-architecture.rst:146 msgid "" "As we continue to enhance Flower at a rapid pace, we'll periodically " "update this explainer document. Feel free to share any feedback with us." @@ -6071,9 +6291,10 @@ msgid "Aggregate Custom Evaluation Results" msgstr "Agréger les résultats de l'évaluation personnalisée" #: ../../source/how-to-aggregate-evaluation-results.rst:10 +#, fuzzy msgid "" -"The same :code:`Strategy`-customization approach can be used to aggregate" -" custom evaluation results coming from individual clients. Clients can " +"The same ``Strategy``-customization approach can be used to aggregate " +"custom evaluation results coming from individual clients. Clients can " "return custom metrics to the server by returning a dictionary:" msgstr "" "La même approche de personnalisation :code:`Stratégie` peut être utilisée" @@ -6081,7 +6302,7 @@ msgstr "" "clients individuels. Les clients peuvent renvoyer des mesures " "personnalisées au serveur en renvoyant un dictionnaire :" -#: ../../source/how-to-aggregate-evaluation-results.rst:36 +#: ../../source/how-to-aggregate-evaluation-results.rst:39 msgid "" "The server can then use a customized strategy to aggregate the metrics " "provided in these dictionaries:" @@ -6101,27 +6322,27 @@ msgid "" " works:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:7 +#: ../../source/how-to-authenticate-supernodes.rst:8 msgid "SuperLink (server) stores a list of known (client) node public keys" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:8 +#: ../../source/how-to-authenticate-supernodes.rst:9 msgid "" "Using ECDH, both SuperNode and SuperLink independently derive a shared " "secret" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:9 +#: ../../source/how-to-authenticate-supernodes.rst:10 msgid "" "Shared secret is used to compute the HMAC value of the message sent from " "SuperNode to SuperLink as a token" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:10 +#: ../../source/how-to-authenticate-supernodes.rst:12 msgid "SuperLink verifies the token" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:12 +#: ../../source/how-to-authenticate-supernodes.rst:14 #, fuzzy msgid "" "We recommend you to check out the complete `code example " @@ -6133,47 +6354,46 @@ msgstr "" "`_ " "pour en savoir plus." -#: ../../source/how-to-authenticate-supernodes.rst:15 +#: ../../source/how-to-authenticate-supernodes.rst:20 msgid "" "This guide covers a preview feature that might change in future versions " "of Flower." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:18 +#: ../../source/how-to-authenticate-supernodes.rst:24 msgid "" "For increased security, node authentication can only be used when " "encrypted connections (SSL/TLS) are enabled." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:21 -msgid "Enable node authentication in :code:`SuperLink`" +#: ../../source/how-to-authenticate-supernodes.rst:28 +msgid "Enable node authentication in ``SuperLink``" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:23 +#: ../../source/how-to-authenticate-supernodes.rst:30 msgid "" "To enable node authentication, first you need to configure SSL/TLS " "connections to secure the SuperLink<>SuperNode communication. You can " "find the complete guide `here `_. After configuring secure connections, you" -" can enable client authentication in a long-running Flower " -":code:`SuperLink`. Use the following terminal command to start a Flower " -":code:`SuperNode` that has both secure connections and node " -"authentication enabled:" +" can enable client authentication in a long-running Flower ``SuperLink``." +" Use the following terminal command to start a Flower ``SuperNode`` that " +"has both secure connections and node authentication enabled:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:38 +#: ../../source/how-to-authenticate-supernodes.rst:47 msgid "Let's break down the authentication flags:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:40 +#: ../../source/how-to-authenticate-supernodes.rst:49 msgid "" -"The first flag :code:`--auth-list-public-keys` expects a path to a CSV " -"file storing all known node public keys. You need to store all known node" -" public keys that are allowed to participate in a federation in one CSV " -"file (:code:`.csv`)." +"The first flag ``--auth-list-public-keys`` expects a path to a CSV file " +"storing all known node public keys. You need to store all known node " +"public keys that are allowed to participate in a federation in one CSV " +"file (``.csv``)." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:42 +#: ../../source/how-to-authenticate-supernodes.rst:53 msgid "" "A valid CSV file storing known node public keys should list the keys in " "OpenSSH format, separated by commas and without any comments. For an " @@ -6181,15 +6401,15 @@ msgid "" "known node public keys." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:44 +#: ../../source/how-to-authenticate-supernodes.rst:57 msgid "" -"The second and third flags :code:`--auth-superlink-private-key` and :code" -":`--auth-superlink-public-key` expect paths to the server's private and " -"public keys. For development purposes, you can generate a private and " -"public key pair using :code:`ssh-keygen -t ecdsa -b 384`." +"The second and third flags ``--auth-superlink-private-key`` and ``--auth-" +"superlink-public-key`` expect paths to the server's private and public " +"keys. For development purposes, you can generate a private and public key" +" pair using ``ssh-keygen -t ecdsa -b 384``." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:47 +#: ../../source/how-to-authenticate-supernodes.rst:64 msgid "" "In Flower 1.9, there is no support for dynamically removing, editing, or " "adding known node public keys to the SuperLink. To change the set of " @@ -6198,32 +6418,32 @@ msgid "" " nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:53 -msgid "Enable node authentication in :code:`SuperNode`" +#: ../../source/how-to-authenticate-supernodes.rst:71 +msgid "Enable node authentication in ``SuperNode``" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:55 +#: ../../source/how-to-authenticate-supernodes.rst:73 msgid "" -"Similar to the long-running Flower server (:code:`SuperLink`), you can " -"easily enable node authentication in the long-running Flower client " -"(:code:`SuperNode`). Use the following terminal command to start an " -"authenticated :code:`SuperNode`:" +"Similar to the long-running Flower server (``SuperLink``), you can easily" +" enable node authentication in the long-running Flower client " +"(``SuperNode``). Use the following terminal command to start an " +"authenticated ``SuperNode``:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:66 +#: ../../source/how-to-authenticate-supernodes.rst:85 msgid "" -"The :code:`--auth-supernode-private-key` flag expects a path to the " -"node's private key file and the :code:`--auth-supernode-public-key` flag " -"expects a path to the node's public key file. For development purposes, " -"you can generate a private and public key pair using :code:`ssh-keygen -t" -" ecdsa -b 384`." +"The ``--auth-supernode-private-key`` flag expects a path to the node's " +"private key file and the ``--auth-supernode-public-key`` flag expects a " +"path to the node's public key file. For development purposes, you can " +"generate a private and public key pair using ``ssh-keygen -t ecdsa -b " +"384``." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:70 +#: ../../source/how-to-authenticate-supernodes.rst:91 msgid "Security notice" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:72 +#: ../../source/how-to-authenticate-supernodes.rst:93 msgid "" "The system's security relies on the credentials of the SuperLink and each" " SuperNode. Therefore, it is imperative to safeguard and safely store the" @@ -6234,19 +6454,19 @@ msgid "" "methods." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:77 -#: ../../source/how-to-enable-ssl-connections.rst:68 -#: ../../source/how-to-use-built-in-mods.rst:85 +#: ../../source/how-to-authenticate-supernodes.rst:100 +#: ../../source/how-to-enable-ssl-connections.rst:71 +#: ../../source/how-to-use-built-in-mods.rst:95 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 msgid "Conclusion" msgstr "Conclusion" -#: ../../source/how-to-authenticate-supernodes.rst:79 +#: ../../source/how-to-authenticate-supernodes.rst:102 msgid "" "You should now have learned how to start a long-running Flower server " -"(:code:`SuperLink`) and client (:code:`SuperNode`) with node " -"authentication enabled. You should also know the significance of the " -"private key and store it safely to minimize security risks." +"(``SuperLink``) and client (``SuperNode``) with node authentication " +"enabled. You should also know the significance of the private key and " +"store it safely to minimize security risks." msgstr "" #: ../../source/how-to-configure-clients.rst:2 @@ -6267,11 +6487,11 @@ msgstr "" "populaire de contrôler les hyperparamètres côté client à partir du " "serveur." -#: ../../source/how-to-configure-clients.rst:7 +#: ../../source/how-to-configure-clients.rst:9 msgid "Configuration values" msgstr "Valeurs de configuration" -#: ../../source/how-to-configure-clients.rst:9 +#: ../../source/how-to-configure-clients.rst:11 msgid "" "Configuration values are represented as a dictionary with ``str`` keys " "and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " @@ -6284,7 +6504,7 @@ msgstr "" "dans d'autres langages). Voici un exemple de dictionnaire de " "configuration en Python :" -#: ../../source/how-to-configure-clients.rst:20 +#: ../../source/how-to-configure-clients.rst:25 msgid "" "Flower serializes these configuration dictionaries (or *config dict* for " "short) to their ProtoBuf representation, transports them to the client " @@ -6294,7 +6514,7 @@ msgstr "" "abrégé) dans leur représentation ProtoBuf, les transporte vers le client " "à l'aide de gRPC, puis les désérialise à nouveau en dictionnaires Python." -#: ../../source/how-to-configure-clients.rst:24 +#: ../../source/how-to-configure-clients.rst:31 msgid "" "Currently, there is no support for directly sending collection types " "(e.g., ``Set``, ``List``, ``Map``) as values in configuration " @@ -6309,7 +6529,7 @@ msgstr "" "l'un des types de valeurs pris en charge (et en les reconvertissant du " "côté client)." -#: ../../source/how-to-configure-clients.rst:26 +#: ../../source/how-to-configure-clients.rst:36 msgid "" "One can, for example, convert a list of floating-point numbers to a JSON " "string, then send the JSON string using the configuration dictionary, and" @@ -6321,18 +6541,19 @@ msgstr "" "de configuration, et enfin reconvertir la chaîne JSON en une liste de " "nombres à virgule flottante sur le client." -#: ../../source/how-to-configure-clients.rst:30 +#: ../../source/how-to-configure-clients.rst:41 msgid "Configuration through built-in strategies" msgstr "Configuration par le biais de stratégies intégrées" -#: ../../source/how-to-configure-clients.rst:32 +#: ../../source/how-to-configure-clients.rst:43 +#, fuzzy msgid "" "The easiest way to send configuration values to clients is to use a " -"built-in strategy like :code:`FedAvg`. Built-in strategies support so-" -"called configuration functions. A configuration function is a function " -"that the built-in strategy calls to get the configuration dictionary for " -"the current round. It then forwards the configuration dictionary to all " -"the clients selected during that round." +"built-in strategy like ``FedAvg``. Built-in strategies support so-called " +"configuration functions. A configuration function is a function that the " +"built-in strategy calls to get the configuration dictionary for the " +"current round. It then forwards the configuration dictionary to all the " +"clients selected during that round." msgstr "" "La façon la plus simple d'envoyer des valeurs de configuration aux " "clients est d'utiliser une stratégie intégrée comme :code:`FedAvg`. Les " @@ -6342,7 +6563,7 @@ msgstr "" "pour le tour en cours. Elle transmet ensuite le dictionnaire de " "configuration à tous les clients sélectionnés au cours de ce tour." -#: ../../source/how-to-configure-clients.rst:34 +#: ../../source/how-to-configure-clients.rst:49 msgid "" "Let's start with a simple example. Imagine we want to send (a) the batch " "size that the client should use, (b) the current global round of " @@ -6354,21 +6575,22 @@ msgstr "" " de l'apprentissage fédéré et (c) le nombre d'époques à former du côté " "client. Notre fonction de configuration pourrait ressembler à ceci :" -#: ../../source/how-to-configure-clients.rst:47 +#: ../../source/how-to-configure-clients.rst:65 +#, fuzzy msgid "" "To make the built-in strategies use this function, we can pass it to " "``FedAvg`` during initialization using the parameter " -":code:`on_fit_config_fn`:" +"``on_fit_config_fn``:" msgstr "" "Pour que les stratégies intégrées utilisent cette fonction, nous pouvons " "la passer à ``FedAvg`` lors de l'initialisation en utilisant le paramètre" " :code:`on_fit_config_fn` :" -#: ../../source/how-to-configure-clients.rst:56 +#: ../../source/how-to-configure-clients.rst:75 msgid "One the client side, we receive the configuration dictionary in ``fit``:" msgstr "Côté client, nous recevons le dictionnaire de configuration dans ``fit`` :" -#: ../../source/how-to-configure-clients.rst:67 +#: ../../source/how-to-configure-clients.rst:86 msgid "" "There is also an `on_evaluate_config_fn` to configure evaluation, which " "works the same way. They are separate functions because one might want to" @@ -6380,7 +6602,7 @@ msgstr "" "séparées car on peut vouloir envoyer différentes valeurs de configuration" " à `evaluate` (par exemple, pour utiliser une taille de lot différente)." -#: ../../source/how-to-configure-clients.rst:69 +#: ../../source/how-to-configure-clients.rst:90 msgid "" "The built-in strategies call this function every round (that is, every " "time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " @@ -6397,15 +6619,16 @@ msgstr "" "d'hyperparamètres, par exemple, pour augmenter le nombre d'époques " "locales au cours des derniers tours, nous pourrions faire ce qui suit :" -#: ../../source/how-to-configure-clients.rst:82 -msgid "The :code:`FedAvg` strategy will call this function *every round*." +#: ../../source/how-to-configure-clients.rst:107 +#, fuzzy +msgid "The ``FedAvg`` strategy will call this function *every round*." msgstr "La stratégie :code:`FedAvg` appellera cette fonction *à chaque tour*." -#: ../../source/how-to-configure-clients.rst:85 +#: ../../source/how-to-configure-clients.rst:110 msgid "Configuring individual clients" msgstr "Configuration des clients individuels" -#: ../../source/how-to-configure-clients.rst:87 +#: ../../source/how-to-configure-clients.rst:112 msgid "" "In some cases, it is necessary to send different configuration values to " "different clients." @@ -6413,16 +6636,16 @@ msgstr "" "Dans certains cas, il est nécessaire d'envoyer des valeurs de " "configuration différentes à des clients différents." -#: ../../source/how-to-configure-clients.rst:89 +#: ../../source/how-to-configure-clients.rst:115 #, fuzzy msgid "" "This can be achieved by customizing an existing strategy or by " ":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" -" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" -" to the config dict of a *single client* (only the first client in the " -"list, the other clients in this round to not receive this \"special\" " -"config value):" +"strategies>`. Here's a nonsensical example that customizes ``FedAvg`` by " +"adding a custom ``\"hello\": \"world\"`` configuration key/value pair to " +"the config dict of a *single client* (only the first client in the list, " +"the other clients in this round to not receive this \"special\" config " +"value):" msgstr "" "Ceci peut être réalisé en personnalisant une stratégie existante ou en " "`mettant en œuvre une stratégie personnalisée à partir de zéro " @@ -6448,34 +6671,35 @@ msgstr "" #: ../../source/how-to-configure-logging.rst:13 msgid "" "containing relevant information including: log message level (e.g. " -":code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging " -"took place from, as well as the log message itself. In this way, the " -"logger would typically display information on your terminal as follows:" +"``INFO``, ``DEBUG``), a timestamp, the line where the logging took place " +"from, as well as the log message itself. In this way, the logger would " +"typically display information on your terminal as follows:" msgstr "" -#: ../../source/how-to-configure-logging.rst:34 +#: ../../source/how-to-configure-logging.rst:35 msgid "Saving log to file" msgstr "" -#: ../../source/how-to-configure-logging.rst:36 +#: ../../source/how-to-configure-logging.rst:37 msgid "" "By default, the Flower log is outputted to the terminal where you launch " "your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do :code:`fl.server.start_server`) and when " -"using the :code:`VirtualClientEngine` (i.e. when you do " -":code:`fl.simulation.start_simulation`). In some situations you might " -"want to save this log to disk. You can do so by calling the " +"federation (i.e. when you do ``fl.server.start_server``) and when using " +"the ``VirtualClientEngine`` (i.e. when you do " +"``fl.simulation.start_simulation``). In some situations you might want to" +" save this log to disk. You can do so by calling the " "`fl.common.logger.configure() " "`_" " function. For example:" msgstr "" -#: ../../source/how-to-configure-logging.rst:53 +#: ../../source/how-to-configure-logging.rst:59 +#, fuzzy msgid "" "With the above, Flower will record the log you see on your terminal to " -":code:`log.txt`. This file will be created in the same directory as were " -"you are running the code from. If we inspect we see the log above is also" -" recorded but prefixing with :code:`identifier` each line:" +"``log.txt``. This file will be created in the same directory as were you " +"are running the code from. If we inspect we see the log above is also " +"recorded but prefixing with ``identifier`` each line:" msgstr "" "Avec ce qui précède, Flower enregistrera le log que vous voyez sur votre " "terminal dans :code:`log.txt`. Ce fichier sera créé dans le répertoire " @@ -6483,38 +6707,37 @@ msgstr "" "log ci-dessous est également enregistré mais préfixé avec " ":code:`identifier` sur chaque ligne :" -#: ../../source/how-to-configure-logging.rst:74 +#: ../../source/how-to-configure-logging.rst:81 msgid "Log your own messages" msgstr "Loggez vos propres messages" -#: ../../source/how-to-configure-logging.rst:76 +#: ../../source/how-to-configure-logging.rst:83 msgid "" "You might expand the information shown by default with the Flower logger " "by adding more messages relevant to your application. You can achieve " "this easily as follows." msgstr "" -#: ../../source/how-to-configure-logging.rst:102 +#: ../../source/how-to-configure-logging.rst:114 msgid "" "In this way your logger will show, in addition to the default messages, " "the ones introduced by the clients as specified above." msgstr "" -#: ../../source/how-to-configure-logging.rst:128 +#: ../../source/how-to-configure-logging.rst:140 msgid "Log to a remote service" msgstr "" -#: ../../source/how-to-configure-logging.rst:130 +#: ../../source/how-to-configure-logging.rst:142 msgid "" -"The :code:`fl.common.logger.configure` function, also allows specifying a" -" host to which logs can be pushed (via :code:`POST`) through a native " -"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" -" feature in :code:`gRPC`-based Federated Learning workloads where " -"otherwise gathering logs from all entities (i.e. the server and the " -"clients) might be cumbersome. Note that in Flower simulation, the server " -"automatically displays all logs. You can still specify a " -":code:`HTTPHandler` should you wish to backup or analyze the logs " -"somewhere else." +"The ``fl.common.logger.configure`` function, also allows specifying a " +"host to which logs can be pushed (via ``POST``) through a native Python " +"``logging.handler.HTTPHandler``. This is a particularly useful feature in" +" ``gRPC``-based Federated Learning workloads where otherwise gathering " +"logs from all entities (i.e. the server and the clients) might be " +"cumbersome. Note that in Flower simulation, the server automatically " +"displays all logs. You can still specify a ``HTTPHandler`` should you " +"wish to backup or analyze the logs somewhere else." msgstr "" #: ../../source/how-to-enable-ssl-connections.rst:2 @@ -6526,13 +6749,13 @@ msgstr "Collecte centralisée des données" #, fuzzy msgid "" "This guide describes how to a SSL-enabled secure Flower server " -"(:code:`SuperLink`) can be started and how a Flower client " -"(:code:`SuperNode`) can establish a secure connections to it." +"(``SuperLink``) can be started and how a Flower client (``SuperNode``) " +"can establish a secure connections to it." msgstr "" "Ce guide décrit comment démarrer un serveur Flower sécurisé par SSL et " "comment un client Flower peut établir une connexion sécurisée avec lui." -#: ../../source/how-to-enable-ssl-connections.rst:7 +#: ../../source/how-to-enable-ssl-connections.rst:8 #, fuzzy msgid "" "A complete code example demonstrating a secure connection can be found " @@ -6543,11 +6766,11 @@ msgstr "" "trouvé ici `_." -#: ../../source/how-to-enable-ssl-connections.rst:10 +#: ../../source/how-to-enable-ssl-connections.rst:11 #, fuzzy msgid "" -"The code example comes with a :code:`README.md` file which explains how " -"to start it. Although it is already SSL-enabled, it might be less " +"The code example comes with a ``README.md`` file which explains how to " +"start it. Although it is already SSL-enabled, it might be less " "descriptive on how it does so. Stick to this guide for a deeper " "introduction to the topic." msgstr "" @@ -6566,8 +6789,9 @@ msgid "" "Using SSL-enabled connections requires certificates to be passed to the " "server and client. For the purpose of this guide we are going to generate" " self-signed certificates. As this can become quite complex we are going " -"to ask you to run the script in :code:`examples/advanced-" -"tensorflow/certificates/generate.sh` with the following command sequence:" +"to ask you to run the script in ``examples/advanced-" +"tensorflow/certificates/generate.sh`` with the following command " +"sequence:" msgstr "" "L'utilisation de connexions compatibles avec le protocole SSL nécessite " "que des certificats soient transmis au serveur et au client. Pour les " @@ -6579,13 +6803,13 @@ msgstr "" #: ../../source/how-to-enable-ssl-connections.rst:29 #, fuzzy msgid "" -"This will generate the certificates in :code:`examples/advanced-" -"tensorflow/.cache/certificates`." +"This will generate the certificates in ``examples/advanced-" +"tensorflow/.cache/certificates``." msgstr "" "Cela générera les certificats dans :code:`examples/advanced-" "tensorflow/.cache/certificates`." -#: ../../source/how-to-enable-ssl-connections.rst:31 +#: ../../source/how-to-enable-ssl-connections.rst:32 #, fuzzy msgid "" "The approach for generating SSL certificates in the context of this " @@ -6600,12 +6824,12 @@ msgstr "" "servir d'inspiration et de point de départ, mais ne doit pas être " "considérée comme complète pour les environnements de production." -#: ../../source/how-to-enable-ssl-connections.rst:39 +#: ../../source/how-to-enable-ssl-connections.rst:40 #, fuzzy msgid "Server (SuperLink)" msgstr "flower-superlink" -#: ../../source/how-to-enable-ssl-connections.rst:41 +#: ../../source/how-to-enable-ssl-connections.rst:42 #, fuzzy msgid "" "Use the following terminal command to start a sever (SuperLink) that uses" @@ -6614,19 +6838,19 @@ msgstr "" "Nous allons maintenant montrer comment écrire un client qui utilise les " "scripts générés précédemment :" -#: ../../source/how-to-enable-ssl-connections.rst:50 +#: ../../source/how-to-enable-ssl-connections.rst:52 msgid "" "When providing certificates, the server expects a tuple of three " "certificates paths: CA certificate, server certificate and server private" " key." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:54 +#: ../../source/how-to-enable-ssl-connections.rst:56 #, fuzzy msgid "Client (SuperNode)" msgstr "Codes d'état du client." -#: ../../source/how-to-enable-ssl-connections.rst:56 +#: ../../source/how-to-enable-ssl-connections.rst:58 #, fuzzy msgid "" "Use the following terminal command to start a client (SuperNode) that " @@ -6635,18 +6859,18 @@ msgstr "" "Nous allons maintenant montrer comment écrire un client qui utilise les " "scripts générés précédemment :" -#: ../../source/how-to-enable-ssl-connections.rst:64 +#: ../../source/how-to-enable-ssl-connections.rst:67 #, fuzzy msgid "" -"When setting :code:`root_certificates`, the client expects a file path to" -" PEM-encoded root certificates." +"When setting ``root_certificates``, the client expects a file path to " +"PEM-encoded root certificates." msgstr "" "En définissant :code:`root_certificates`, le client s'attend à recevoir " "les certificats racine codés en PEM sous forme de chaîne d'octets. Nous " "utilisons à nouveau :code:`Path` pour simplifier la lecture de ces " "certificats sous forme de chaînes d'octets." -#: ../../source/how-to-enable-ssl-connections.rst:70 +#: ../../source/how-to-enable-ssl-connections.rst:73 #, fuzzy msgid "" "You should now have learned how to generate self-signed certificates " @@ -6657,12 +6881,12 @@ msgstr "" "à l'aide du script donné, à démarrer un serveur compatible SSL et à " "demander à un client d'établir une connexion sécurisée avec lui." -#: ../../source/how-to-enable-ssl-connections.rst:75 +#: ../../source/how-to-enable-ssl-connections.rst:78 #, fuzzy msgid "Additional resources" msgstr "Ressources supplémentaires" -#: ../../source/how-to-enable-ssl-connections.rst:77 +#: ../../source/how-to-enable-ssl-connections.rst:80 msgid "" "These additional sources might be relevant if you would like to dive " "deeper into the topic of certificates:" @@ -6670,11 +6894,11 @@ msgstr "" "Ces sources supplémentaires peuvent être pertinentes si tu souhaites " "approfondir le sujet des certificats :" -#: ../../source/how-to-enable-ssl-connections.rst:79 +#: ../../source/how-to-enable-ssl-connections.rst:83 msgid "`Let's Encrypt `_" msgstr "`Let's Encrypt `_" -#: ../../source/how-to-enable-ssl-connections.rst:80 +#: ../../source/how-to-enable-ssl-connections.rst:84 msgid "`certbot `_" msgstr "`certbot `_" @@ -6701,13 +6925,15 @@ msgstr "" "intégrées qui sont basées sur la même API que celle décrite ci-dessous." #: ../../source/how-to-implement-strategies.rst:11 -msgid "The :code:`Strategy` abstraction" +#, fuzzy +msgid "The ``Strategy`` abstraction" msgstr "L'abstraction :code:`Stratégie`" #: ../../source/how-to-implement-strategies.rst:13 +#, fuzzy msgid "" "All strategy implementation are derived from the abstract base class " -":code:`flwr.server.strategy.Strategy`, both built-in implementations and " +"``flwr.server.strategy.Strategy``, both built-in implementations and " "third party implementations. This means that custom strategy " "implementations have the exact same capabilities at their disposal as " "built-in ones." @@ -6726,60 +6952,65 @@ msgstr "" "L'abstraction de la stratégie définit quelques méthodes abstraites qui " "doivent être mises en œuvre :" -#: ../../source/how-to-implement-strategies.rst:75 +#: ../../source/how-to-implement-strategies.rst:67 +#, fuzzy msgid "" -"Creating a new strategy means implementing a new :code:`class` (derived " -"from the abstract base class :code:`Strategy`) that implements for the " -"previously shown abstract methods:" +"Creating a new strategy means implementing a new ``class`` (derived from " +"the abstract base class ``Strategy``) that implements for the previously " +"shown abstract methods:" msgstr "" "La création d'une nouvelle stratégie implique la mise en œuvre d'une " "nouvelle :code:`classe` (dérivée de la classe de base abstraite " ":code:`Stratégie`) qui met en œuvre les méthodes abstraites présentées " "précédemment :" -#: ../../source/how-to-implement-strategies.rst:100 +#: ../../source/how-to-implement-strategies.rst:97 msgid "The Flower server calls these methods in the following order:" msgstr "Le serveur Flower appelle ces méthodes dans l'ordre suivant :" -#: ../../source/how-to-implement-strategies.rst:177 +#: ../../source/how-to-implement-strategies.rst:174 msgid "The following sections describe each of those methods in more detail." msgstr "Les sections suivantes décrivent chacune de ces méthodes plus en détail." -#: ../../source/how-to-implement-strategies.rst:180 -msgid "The :code:`initialize_parameters` method" +#: ../../source/how-to-implement-strategies.rst:177 +#, fuzzy +msgid "The ``initialize_parameters`` method" msgstr "La méthode :code:`initialize_parameters` (initialisation des paramètres)" -#: ../../source/how-to-implement-strategies.rst:182 +#: ../../source/how-to-implement-strategies.rst:179 +#, fuzzy msgid "" -":code:`initialize_parameters` is called only once, at the very beginning " -"of an execution. It is responsible for providing the initial global model" -" parameters in a serialized form (i.e., as a :code:`Parameters` object)." +"``initialize_parameters`` is called only once, at the very beginning of " +"an execution. It is responsible for providing the initial global model " +"parameters in a serialized form (i.e., as a ``Parameters`` object)." msgstr "" ":code:`initialize_parameters` n'est appelé qu'une seule fois, au tout " "début d'une exécution. Il est chargé de fournir les paramètres initiaux " "du modèle global sous une forme sérialisée (c'est-à-dire sous la forme " "d'un objet :code:`Parameters`)." -#: ../../source/how-to-implement-strategies.rst:184 +#: ../../source/how-to-implement-strategies.rst:183 +#, fuzzy msgid "" "Built-in strategies return user-provided initial parameters. The " "following example shows how initial parameters can be passed to " -":code:`FedAvg`:" +"``FedAvg``:" msgstr "" "Les stratégies intégrées renvoient les paramètres initiaux fournis par " "l'utilisateur. L'exemple suivant montre comment les paramètres initiaux " "peuvent être transmis à :code:`FedAvg` :" #: ../../source/how-to-implement-strategies.rst:209 +#, fuzzy msgid "" -"The Flower server will call :code:`initialize_parameters`, which either " -"returns the parameters that were passed to :code:`initial_parameters`, or" -" :code:`None`. If no parameters are returned from " -":code:`initialize_parameters` (i.e., :code:`None`), the server will " -"randomly select one client and ask it to provide its parameters. This is " -"a convenience feature and not recommended in practice, but it can be " -"useful for prototyping. In practice, it is recommended to always use " -"server-side parameter initialization." +"The Flower server will call ``initialize_parameters``, which either " +"returns the parameters that were passed to ``initial_parameters``, or " +"``None``. If no parameters are returned from ``initialize_parameters`` " +"(i.e., ``None``), the server will randomly select one client and ask it " +"to provide its parameters. This is a convenience feature and not " +"recommended in practice, but it can be useful for prototyping. In " +"practice, it is recommended to always use server-side parameter " +"initialization." msgstr "" "Le serveur Flower appelle :code:`initialize_parameters`, qui renvoie les " "paramètres passés à :code:`initial_parameters`, ou :code:`None`. Si aucun" @@ -6790,7 +7021,7 @@ msgstr "" "prototypage. Dans la pratique, il est recommandé de toujours utiliser " "l'initialisation des paramètres du côté du serveur." -#: ../../source/how-to-implement-strategies.rst:213 +#: ../../source/how-to-implement-strategies.rst:218 msgid "" "Server-side parameter initialization is a powerful mechanism. It can be " "used, for example, to resume training from a previously saved checkpoint." @@ -6805,58 +7036,63 @@ msgstr "" "hybrides, par exemple, pour affiner un modèle pré-entraîné à l'aide de " "l'apprentissage fédéré." -#: ../../source/how-to-implement-strategies.rst:216 -msgid "The :code:`configure_fit` method" +#: ../../source/how-to-implement-strategies.rst:224 +#, fuzzy +msgid "The ``configure_fit`` method" msgstr "La méthode :code:`configure_fit`" -#: ../../source/how-to-implement-strategies.rst:218 +#: ../../source/how-to-implement-strategies.rst:226 +#, fuzzy msgid "" -":code:`configure_fit` is responsible for configuring the upcoming round " -"of training. What does *configure* mean in this context? Configuring a " -"round means selecting clients and deciding what instructions to send to " -"these clients. The signature of :code:`configure_fit` makes this clear:" +"``configure_fit`` is responsible for configuring the upcoming round of " +"training. What does *configure* mean in this context? Configuring a round" +" means selecting clients and deciding what instructions to send to these " +"clients. The signature of ``configure_fit`` makes this clear:" msgstr "" ":code:`configure_fit` est chargé de configurer le prochain tour de " "formation. Que signifie *configurer* dans ce contexte ? Configurer un " "tour signifie sélectionner des clients et décider des instructions à leur" " envoyer. La signature de :code:`configure_fit` l'indique clairement :" -#: ../../source/how-to-implement-strategies.rst:231 +#: ../../source/how-to-implement-strategies.rst:239 +#, fuzzy msgid "" "The return value is a list of tuples, each representing the instructions " "that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_fit`:" +"usually perform the following steps in ``configure_fit``:" msgstr "" "La valeur de retour est une liste de tuples, chacun représentant les " "instructions qui seront envoyées à un client particulier. Les " "implémentations de stratégies effectuent généralement les étapes " "suivantes dans :code:`configure_fit` :" -#: ../../source/how-to-implement-strategies.rst:233 -#: ../../source/how-to-implement-strategies.rst:280 +#: ../../source/how-to-implement-strategies.rst:243 +#: ../../source/how-to-implement-strategies.rst:307 +#, fuzzy msgid "" -"Use the :code:`client_manager` to randomly sample all (or a subset of) " -"available clients (each represented as a :code:`ClientProxy` object)" +"Use the ``client_manager`` to randomly sample all (or a subset of) " +"available clients (each represented as a ``ClientProxy`` object)" msgstr "" "Utilise le :code:`client_manager` pour échantillonner au hasard tous les " "clients disponibles (ou un sous-ensemble d'entre eux) (chacun représenté " "par un objet :code:`ClientProxy`)" -#: ../../source/how-to-implement-strategies.rst:234 +#: ../../source/how-to-implement-strategies.rst:245 +#, fuzzy msgid "" -"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " -"current global model :code:`parameters` and :code:`config` dict" +"Pair each ``ClientProxy`` with the same ``FitIns`` holding the current " +"global model ``parameters`` and ``config`` dict" msgstr "" "Associe chaque :code:`ClientProxy` au même :code:`FitIns` contenant le " "modèle global actuel :code:`parameters` et :code:`config` dict" -#: ../../source/how-to-implement-strategies.rst:236 +#: ../../source/how-to-implement-strategies.rst:248 #, fuzzy msgid "" -"More sophisticated implementations can use :code:`configure_fit` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_fit`." +"More sophisticated implementations can use ``configure_fit`` to implement" +" custom client selection logic. A client will only participate in a round" +" if the corresponding ``ClientProxy`` is included in the list returned " +"from ``configure_fit``." msgstr "" "Les implémentations plus sophistiquées peuvent utiliser " ":code:`configure_fit` pour mettre en œuvre une logique de sélection des " @@ -6864,14 +7100,14 @@ msgstr "" ":code:`ClientProxy` correspondant est inclus dans la liste renvoyée par " ":code:`configure_fit`." -#: ../../source/how-to-implement-strategies.rst:240 +#: ../../source/how-to-implement-strategies.rst:254 +#, fuzzy msgid "" "The structure of this return value provides a lot of flexibility to the " "user. Since instructions are defined on a per-client basis, different " "instructions can be sent to each client. This enables custom strategies " "to train, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" "La structure de cette valeur de retour offre beaucoup de souplesse à " "l'utilisateur. Comme les instructions sont définies par client, des " @@ -6880,26 +7116,28 @@ msgstr "" " différents modèles sur différents clients, ou utiliser différents " "hyperparamètres sur différents clients (via le dict :code:`config`)." -#: ../../source/how-to-implement-strategies.rst:243 -msgid "The :code:`aggregate_fit` method" +#: ../../source/how-to-implement-strategies.rst:261 +#, fuzzy +msgid "The ``aggregate_fit`` method" msgstr "La méthode :code:`aggregate_fit` (agrégation)" -#: ../../source/how-to-implement-strategies.rst:245 +#: ../../source/how-to-implement-strategies.rst:263 +#, fuzzy msgid "" -":code:`aggregate_fit` is responsible for aggregating the results returned" -" by the clients that were selected and asked to train in " -":code:`configure_fit`." +"``aggregate_fit`` is responsible for aggregating the results returned by " +"the clients that were selected and asked to train in ``configure_fit``." msgstr "" ":code:`aggregate_fit` est chargé d'agréger les résultats renvoyés par les" " clients qui ont été sélectionnés et à qui on a demandé de s'entraîner " "dans :code:`configure_fit`." -#: ../../source/how-to-implement-strategies.rst:258 +#: ../../source/how-to-implement-strategies.rst:277 +#, fuzzy msgid "" "Of course, failures can happen, so there is no guarantee that the server " "will get results from all the clients it sent instructions to (via " -":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " -"of :code:`results`, but also a list of :code:`failures`." +"``configure_fit``). ``aggregate_fit`` therefore receives a list of " +"``results``, but also a list of ``failures``." msgstr "" "Bien sûr, des échecs peuvent se produire, il n'y a donc aucune garantie " "que le serveur obtienne des résultats de tous les clients auxquels il a " @@ -6907,12 +7145,13 @@ msgstr "" ":code:`aggregate_fit` reçoit donc une liste de :code:`résultats`, mais " "aussi une liste de :code:`échecs`." -#: ../../source/how-to-implement-strategies.rst:260 +#: ../../source/how-to-implement-strategies.rst:282 +#, fuzzy msgid "" -":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" -" dictionary of aggregated metrics. The :code:`Parameters` return value is" -" optional because :code:`aggregate_fit` might decide that the results " -"provided are not sufficient for aggregation (e.g., too many failures)." +"``aggregate_fit`` returns an optional ``Parameters`` object and a " +"dictionary of aggregated metrics. The ``Parameters`` return value is " +"optional because ``aggregate_fit`` might decide that the results provided" +" are not sufficient for aggregation (e.g., too many failures)." msgstr "" ":code:`aggregate_fit` renvoie un objet :code:`Parameters` facultatif et " "un dictionnaire de métriques agrégées. La valeur de retour " @@ -6920,17 +7159,18 @@ msgstr "" " que les résultats fournis ne sont pas suffisants pour l'agrégation (par " "exemple, trop d'échecs)." -#: ../../source/how-to-implement-strategies.rst:263 -msgid "The :code:`configure_evaluate` method" +#: ../../source/how-to-implement-strategies.rst:288 +#, fuzzy +msgid "The ``configure_evaluate`` method" msgstr "La méthode :code:`configure_evaluate` (en anglais)" -#: ../../source/how-to-implement-strategies.rst:265 +#: ../../source/how-to-implement-strategies.rst:290 +#, fuzzy msgid "" -":code:`configure_evaluate` is responsible for configuring the upcoming " -"round of evaluation. What does *configure* mean in this context? " -"Configuring a round means selecting clients and deciding what " -"instructions to send to these clients. The signature of " -":code:`configure_evaluate` makes this clear:" +"``configure_evaluate`` is responsible for configuring the upcoming round " +"of evaluation. What does *configure* mean in this context? Configuring a " +"round means selecting clients and deciding what instructions to send to " +"these clients. The signature of ``configure_evaluate`` makes this clear:" msgstr "" ":code:`configure_evaluate` est chargé de configurer le prochain tour " "d'évaluation. Que signifie *configurer* dans ce contexte ? Configurer un " @@ -6938,32 +7178,34 @@ msgstr "" " envoyer. La signature de :code:`configure_evaluate` l'indique clairement" " :" -#: ../../source/how-to-implement-strategies.rst:278 +#: ../../source/how-to-implement-strategies.rst:303 +#, fuzzy msgid "" "The return value is a list of tuples, each representing the instructions " "that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_evaluate`:" +"usually perform the following steps in ``configure_evaluate``:" msgstr "" "La valeur de retour est une liste de tuples, chacun représentant les " "instructions qui seront envoyées à un client particulier. Les " "implémentations de stratégies effectuent généralement les étapes " "suivantes dans :code:`configure_evaluate` :" -#: ../../source/how-to-implement-strategies.rst:281 +#: ../../source/how-to-implement-strategies.rst:309 +#, fuzzy msgid "" -"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " -"the current global model :code:`parameters` and :code:`config` dict" +"Pair each ``ClientProxy`` with the same ``EvaluateIns`` holding the " +"current global model ``parameters`` and ``config`` dict" msgstr "" "Associe chaque :code:`ClientProxy` au même :code:`EvaluateIns` contenant " "le modèle global actuel :code:`parameters` et :code:`config` dict" -#: ../../source/how-to-implement-strategies.rst:283 +#: ../../source/how-to-implement-strategies.rst:312 #, fuzzy msgid "" -"More sophisticated implementations can use :code:`configure_evaluate` to " +"More sophisticated implementations can use ``configure_evaluate`` to " "implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_evaluate`." +"in a round if the corresponding ``ClientProxy`` is included in the list " +"returned from ``configure_evaluate``." msgstr "" "Les implémentations plus sophistiquées peuvent utiliser " ":code:`configure_evaluate` pour mettre en œuvre une logique de sélection " @@ -6971,14 +7213,14 @@ msgstr "" ":code:`ClientProxy` correspondant est inclus dans la liste renvoyée par " ":code:`configure_evaluate`." -#: ../../source/how-to-implement-strategies.rst:287 +#: ../../source/how-to-implement-strategies.rst:318 +#, fuzzy msgid "" "The structure of this return value provides a lot of flexibility to the " "user. Since instructions are defined on a per-client basis, different " "instructions can be sent to each client. This enables custom strategies " "to evaluate, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" "La structure de cette valeur de retour offre beaucoup de souplesse à " "l'utilisateur. Comme les instructions sont définies par client, des " @@ -6987,26 +7229,29 @@ msgstr "" "modèles sur différents clients, ou d'utiliser différents hyperparamètres " "sur différents clients (via le dict :code:`config`)." -#: ../../source/how-to-implement-strategies.rst:291 -msgid "The :code:`aggregate_evaluate` method" +#: ../../source/how-to-implement-strategies.rst:325 +#, fuzzy +msgid "The ``aggregate_evaluate`` method" msgstr "La méthode :code:`aggregate_evaluate` (agréger_évaluer)" -#: ../../source/how-to-implement-strategies.rst:293 +#: ../../source/how-to-implement-strategies.rst:327 +#, fuzzy msgid "" -":code:`aggregate_evaluate` is responsible for aggregating the results " +"``aggregate_evaluate`` is responsible for aggregating the results " "returned by the clients that were selected and asked to evaluate in " -":code:`configure_evaluate`." +"``configure_evaluate``." msgstr "" ":code:`aggregate_evaluate` est chargé d'agréger les résultats renvoyés " "par les clients qui ont été sélectionnés et à qui l'on a demandé " "d'évaluer dans :code:`configure_evaluate`." -#: ../../source/how-to-implement-strategies.rst:306 +#: ../../source/how-to-implement-strategies.rst:341 +#, fuzzy msgid "" "Of course, failures can happen, so there is no guarantee that the server " "will get results from all the clients it sent instructions to (via " -":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " -"receives a list of :code:`results`, but also a list of :code:`failures`." +"``configure_evaluate``). ``aggregate_evaluate`` therefore receives a list" +" of ``results``, but also a list of ``failures``." msgstr "" "Bien sûr, des échecs peuvent se produire, il n'y a donc aucune garantie " "que le serveur obtienne des résultats de tous les clients auxquels il a " @@ -7014,12 +7259,13 @@ msgstr "" ":code:`aggregate_evaluate` reçoit donc une liste de :code:`résultats`, " "mais aussi une liste d' :code:`échecs`." -#: ../../source/how-to-implement-strategies.rst:308 +#: ../../source/how-to-implement-strategies.rst:346 +#, fuzzy msgid "" -":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" -" dictionary of aggregated metrics. The :code:`float` return value is " -"optional because :code:`aggregate_evaluate` might decide that the results" -" provided are not sufficient for aggregation (e.g., too many failures)." +"``aggregate_evaluate`` returns an optional ``float`` (loss) and a " +"dictionary of aggregated metrics. The ``float`` return value is optional " +"because ``aggregate_evaluate`` might decide that the results provided are" +" not sufficient for aggregation (e.g., too many failures)." msgstr "" ":code:`aggregate_evaluate` renvoie un :code:`float` facultatif (perte) et" " un dictionnaire de mesures agrégées. La valeur de retour :code:`float` " @@ -7027,28 +7273,31 @@ msgstr "" "résultats fournis ne sont pas suffisants pour l'agrégation (par exemple, " "trop d'échecs)." -#: ../../source/how-to-implement-strategies.rst:311 -msgid "The :code:`evaluate` method" +#: ../../source/how-to-implement-strategies.rst:352 +#, fuzzy +msgid "The ``evaluate`` method" msgstr "La méthode :code:`évaluer`" -#: ../../source/how-to-implement-strategies.rst:313 +#: ../../source/how-to-implement-strategies.rst:354 +#, fuzzy msgid "" -":code:`evaluate` is responsible for evaluating model parameters on the " -"server-side. Having :code:`evaluate` in addition to " -":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " -"to perform both servers-side and client-side (federated) evaluation." +"``evaluate`` is responsible for evaluating model parameters on the " +"server-side. Having ``evaluate`` in addition to " +"``configure_evaluate``/``aggregate_evaluate`` enables strategies to " +"perform both servers-side and client-side (federated) evaluation." msgstr "" "le fait d'avoir :code:`evaluate` en plus de " ":code:`configure_evaluate`/:code:`aggregate_evaluate` permet aux " "stratégies d'effectuer des évaluations à la fois côté serveur et côté " "client (fédéré)." -#: ../../source/how-to-implement-strategies.rst:323 +#: ../../source/how-to-implement-strategies.rst:364 +#, fuzzy msgid "" "The return value is again optional because the strategy might not need to" " implement server-side evaluation or because the user-defined " -":code:`evaluate` method might not complete successfully (e.g., it might " -"fail to load the server-side evaluation data)." +"``evaluate`` method might not complete successfully (e.g., it might fail " +"to load the server-side evaluation data)." msgstr "" "La valeur de retour est à nouveau facultative parce que la stratégie peut" " ne pas avoir besoin de mettre en œuvre l'évaluation côté serveur ou " @@ -7061,65 +7310,65 @@ msgstr "" msgid "Install Flower" msgstr "Installer Flower" -#: ../../source/how-to-install-flower.rst:6 +#: ../../source/how-to-install-flower.rst:5 #, fuzzy msgid "Python version" msgstr "Version Python" -#: ../../source/how-to-install-flower.rst:12 +#: ../../source/how-to-install-flower.rst:11 msgid "Install stable release" msgstr "Installe la version stable" -#: ../../source/how-to-install-flower.rst:15 -#: ../../source/how-to-upgrade-to-flower-next.rst:46 +#: ../../source/how-to-install-flower.rst:14 +#: ../../source/how-to-upgrade-to-flower-next.rst:66 msgid "Using pip" msgstr "" -#: ../../source/how-to-install-flower.rst:17 -msgid "" -"Stable releases are available on `PyPI " -"`_::" +#: ../../source/how-to-install-flower.rst:16 +#, fuzzy +msgid "Stable releases are available on `PyPI `_:" msgstr "" "Les versions stables sont disponibles sur `PyPI " "`_: :" -#: ../../source/how-to-install-flower.rst:21 +#: ../../source/how-to-install-flower.rst:22 +#, fuzzy msgid "" "For simulations that use the Virtual Client Engine, ``flwr`` should be " -"installed with the ``simulation`` extra::" +"installed with the ``simulation`` extra:" msgstr "" "Pour les simulations qui utilisent le moteur de client virtuel, ``flwr`` " "doit être installé avec l'option ``simulation``: :" -#: ../../source/how-to-install-flower.rst:27 +#: ../../source/how-to-install-flower.rst:30 msgid "Using conda (or mamba)" msgstr "" -#: ../../source/how-to-install-flower.rst:29 +#: ../../source/how-to-install-flower.rst:32 msgid "Flower can also be installed from the ``conda-forge`` channel." msgstr "" -#: ../../source/how-to-install-flower.rst:31 +#: ../../source/how-to-install-flower.rst:34 msgid "" "If you have not added ``conda-forge`` to your channels, you will first " -"need to run the following::" +"need to run the following:" msgstr "" -#: ../../source/how-to-install-flower.rst:36 +#: ../../source/how-to-install-flower.rst:42 msgid "" "Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " -"installed with ``conda``::" +"installed with ``conda``:" msgstr "" -#: ../../source/how-to-install-flower.rst:40 -msgid "or with ``mamba``::" +#: ../../source/how-to-install-flower.rst:49 +msgid "or with ``mamba``:" msgstr "" -#: ../../source/how-to-install-flower.rst:46 +#: ../../source/how-to-install-flower.rst:56 msgid "Verify installation" msgstr "Vérifie l'installation" -#: ../../source/how-to-install-flower.rst:48 +#: ../../source/how-to-install-flower.rst:58 #, fuzzy msgid "" "The following command can be used to verify if Flower was successfully " @@ -7130,58 +7379,62 @@ msgstr "" "installé avec succès. Si tout a fonctionné, la version de Flower devrait " "être imprimée sur la ligne de commande: :" -#: ../../source/how-to-install-flower.rst:58 +#: ../../source/how-to-install-flower.rst:68 msgid "Advanced installation options" msgstr "Options d'installation avancées" -#: ../../source/how-to-install-flower.rst:61 +#: ../../source/how-to-install-flower.rst:71 #, fuzzy msgid "Install via Docker" msgstr "Installer Flower" -#: ../../source/how-to-install-flower.rst:63 +#: ../../source/how-to-install-flower.rst:73 msgid ":doc:`Run Flower using Docker `" msgstr "" -#: ../../source/how-to-install-flower.rst:66 +#: ../../source/how-to-install-flower.rst:76 msgid "Install pre-release" msgstr "Installer la version pre-release" -#: ../../source/how-to-install-flower.rst:68 +#: ../../source/how-to-install-flower.rst:78 +#, fuzzy msgid "" "New (possibly unstable) versions of Flower are sometimes available as " "pre-release versions (alpha, beta, release candidate) before the stable " -"release happens::" +"release happens:" msgstr "" "Les nouvelles versions (éventuellement instables) de Flower sont parfois " "disponibles en tant que versions préliminaires (alpha, bêta, release " "candidate) avant que la version stable n'arrive : :" -#: ../../source/how-to-install-flower.rst:72 +#: ../../source/how-to-install-flower.rst:85 +#, fuzzy msgid "" "For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" -" should be installed with the ``simulation`` extra::" +" should be installed with the ``simulation`` extra:" msgstr "" "Pour les simulations qui utilisent le moteur de client virtuel, les " "versions de ``flwr`` doivent être installées avec l'option " "``simulation``: :" -#: ../../source/how-to-install-flower.rst:77 +#: ../../source/how-to-install-flower.rst:93 msgid "Install nightly release" msgstr "Installer la version nightly" -#: ../../source/how-to-install-flower.rst:79 +#: ../../source/how-to-install-flower.rst:95 +#, fuzzy msgid "" "The latest (potentially unstable) changes in Flower are available as " -"nightly releases::" +"nightly releases:" msgstr "" "Les dernières modifications (potentiellement instables) de Flower sont " "disponibles sous forme de versions nocturnes: :" -#: ../../source/how-to-install-flower.rst:83 +#: ../../source/how-to-install-flower.rst:101 +#, fuzzy msgid "" "For simulations that use the Virtual Client Engine, ``flwr-nightly`` " -"should be installed with the ``simulation`` extra::" +"should be installed with the ``simulation`` extra:" msgstr "" "Pour les simulations qui utilisent le moteur de client virtuel, ``flwr-" "nightly`` doit être installé avec l'option ``simulation``: :" @@ -7206,7 +7459,7 @@ msgstr "" "sur la consommation des ressources peuvent t'aider à prendre des " "décisions plus intelligentes et à accélérer le temps d'exécution." -#: ../../source/how-to-monitor-simulation.rst:6 +#: ../../source/how-to-monitor-simulation.rst:9 msgid "" "The specific instructions assume you are using macOS and have the " "`Homebrew `_ package manager installed." @@ -7214,11 +7467,11 @@ msgstr "" "Les instructions spécifiques supposent que tu utilises macOS et que le " "gestionnaire de paquets `Homebrew `_ est installé." -#: ../../source/how-to-monitor-simulation.rst:10 +#: ../../source/how-to-monitor-simulation.rst:13 msgid "Downloads" msgstr "Téléchargements" -#: ../../source/how-to-monitor-simulation.rst:16 +#: ../../source/how-to-monitor-simulation.rst:19 msgid "" "`Prometheus `_ is used for data collection, while" " `Grafana `_ will enable you to visualize the " @@ -7230,7 +7483,7 @@ msgstr "" "visualiser les données collectées. Ils sont tous deux bien intégrés à " "`Ray `_ que Flower utilise sous le capot." -#: ../../source/how-to-monitor-simulation.rst:18 +#: ../../source/how-to-monitor-simulation.rst:23 msgid "" "Overwrite the configuration files (depending on your device, it might be " "installed on a different path)." @@ -7238,17 +7491,17 @@ msgstr "" "Écrase les fichiers de configuration (selon ton appareil, il se peut " "qu'il soit installé sur un chemin différent)." -#: ../../source/how-to-monitor-simulation.rst:20 +#: ../../source/how-to-monitor-simulation.rst:26 msgid "If you are on an M1 Mac, it should be:" msgstr "Si tu es sur un Mac M1, il devrait l'être :" -#: ../../source/how-to-monitor-simulation.rst:27 +#: ../../source/how-to-monitor-simulation.rst:33 msgid "On the previous generation Intel Mac devices, it should be:" msgstr "" "Sur les appareils Mac Intel de la génération précédente, ce devrait être " "le cas :" -#: ../../source/how-to-monitor-simulation.rst:34 +#: ../../source/how-to-monitor-simulation.rst:40 msgid "" "Open the respective configuration files and change them. Depending on " "your device, use one of the two following commands:" @@ -7256,7 +7509,7 @@ msgstr "" "Ouvre les fichiers de configuration respectifs et modifie-les. Selon ton " "appareil, utilise l'une des deux commandes suivantes :" -#: ../../source/how-to-monitor-simulation.rst:44 +#: ../../source/how-to-monitor-simulation.rst:51 msgid "" "and then delete all the text in the file and paste a new Prometheus " "config you see below. You may adjust the time intervals to your " @@ -7266,7 +7519,7 @@ msgstr "" "configuration Prometheus que tu vois ci-dessous. Tu peux adapter les " "intervalles de temps à tes besoins :" -#: ../../source/how-to-monitor-simulation.rst:59 +#: ../../source/how-to-monitor-simulation.rst:67 msgid "" "Now after you have edited the Prometheus configuration, do the same with " "the Grafana configuration files. Open those using one of the following " @@ -7276,7 +7529,7 @@ msgstr "" "même avec les fichiers de configuration de Grafana. Ouvre ces derniers à " "l'aide de l'une des commandes suivantes, comme précédemment :" -#: ../../source/how-to-monitor-simulation.rst:69 +#: ../../source/how-to-monitor-simulation.rst:78 msgid "" "Your terminal editor should open and allow you to apply the following " "configuration as before." @@ -7284,7 +7537,7 @@ msgstr "" "Ton éditeur de terminal devrait s'ouvrir et te permettre d'appliquer la " "configuration suivante comme précédemment." -#: ../../source/how-to-monitor-simulation.rst:84 +#: ../../source/how-to-monitor-simulation.rst:94 msgid "" "Congratulations, you just downloaded all the necessary software needed " "for metrics tracking. Now, let’s start it." @@ -7292,11 +7545,11 @@ msgstr "" "Félicitations, tu viens de télécharger tous les logiciels nécessaires au " "suivi des métriques, maintenant, démarrons-le." -#: ../../source/how-to-monitor-simulation.rst:88 +#: ../../source/how-to-monitor-simulation.rst:98 msgid "Tracking metrics" msgstr "Suivi des mesures" -#: ../../source/how-to-monitor-simulation.rst:90 +#: ../../source/how-to-monitor-simulation.rst:100 msgid "" "Before running your Flower simulation, you have to start the monitoring " "tools you have just installed and configured." @@ -7304,7 +7557,7 @@ msgstr "" "Avant de lancer ta simulation Flower, tu dois démarrer les outils de " "surveillance que tu viens d'installer et de configurer." -#: ../../source/how-to-monitor-simulation.rst:97 +#: ../../source/how-to-monitor-simulation.rst:108 msgid "" "Please include the following argument in your Python code when starting a" " simulation." @@ -7312,11 +7565,11 @@ msgstr "" "Tu dois inclure l'argument suivant dans ton code Python lorsque tu " "démarres une simulation." -#: ../../source/how-to-monitor-simulation.rst:108 +#: ../../source/how-to-monitor-simulation.rst:119 msgid "Now, you are ready to start your workload." msgstr "Maintenant, tu es prêt à commencer ta charge de travail." -#: ../../source/how-to-monitor-simulation.rst:110 +#: ../../source/how-to-monitor-simulation.rst:121 msgid "" "Shortly after the simulation starts, you should see the following logs in" " your terminal:" @@ -7324,11 +7577,12 @@ msgstr "" "Peu de temps après le début de la simulation, tu devrais voir les " "journaux suivants dans ton terminal :" -#: ../../source/how-to-monitor-simulation.rst:117 -msgid "You can look at everything at ``_ ." +#: ../../source/how-to-monitor-simulation.rst:127 +#, fuzzy +msgid "You can look at everything at http://127.0.0.1:8265 ." msgstr "Tu peux tout regarder sur ``_ ." -#: ../../source/how-to-monitor-simulation.rst:119 +#: ../../source/how-to-monitor-simulation.rst:129 msgid "" "It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" " lowest option)." @@ -7336,7 +7590,7 @@ msgstr "" "Il s'agit d'un tableau de bord Ray. Tu peux naviguer vers Metrics (sur le" " panneau de gauche, l'option la plus basse)." -#: ../../source/how-to-monitor-simulation.rst:121 +#: ../../source/how-to-monitor-simulation.rst:132 msgid "" "Or alternatively, you can just see them in Grafana by clicking on the " "right-up corner, “View in Grafana”. Please note that the Ray dashboard is" @@ -7350,21 +7604,22 @@ msgstr "" "terminée, tu ne peux utiliser Grafana que pour explorer les métriques. Tu" " peux démarrer Grafana en te rendant sur `http://localhost:3000/``." -#: ../../source/how-to-monitor-simulation.rst:123 +#: ../../source/how-to-monitor-simulation.rst:137 +#, fuzzy msgid "" "After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port :code:`3000` on " -"your machine as long as they are running." +"important as they will otherwise block, for example port ``3000`` on your" +" machine as long as they are running." msgstr "" "Après avoir terminé la visualisation, arrête Prometheus et Grafana. C'est" " important car sinon ils bloqueront, par exemple, le port :code:`3000` " "sur ta machine tant qu'ils seront en cours d'exécution." -#: ../../source/how-to-monitor-simulation.rst:132 +#: ../../source/how-to-monitor-simulation.rst:147 msgid "Resource allocation" msgstr "Allocation des ressources" -#: ../../source/how-to-monitor-simulation.rst:134 +#: ../../source/how-to-monitor-simulation.rst:149 msgid "" "You must understand how the Ray library works to efficiently allocate " "system resources to simulation clients on your own." @@ -7373,7 +7628,7 @@ msgstr "" "efficacement les ressources du système aux clients de simulation de ton " "côté." -#: ../../source/how-to-monitor-simulation.rst:136 +#: ../../source/how-to-monitor-simulation.rst:152 msgid "" "Initially, the simulation (which Ray handles under the hood) starts by " "default with all the available resources on the system, which it shares " @@ -7390,11 +7645,11 @@ msgstr "" "ce blog. Tu peux vérifier les ressources du système en exécutant ce qui " "suit :" -#: ../../source/how-to-monitor-simulation.rst:143 +#: ../../source/how-to-monitor-simulation.rst:164 msgid "In Google Colab, the result you see might be similar to this:" msgstr "Dans Google Colab, le résultat que tu obtiens peut ressembler à ceci :" -#: ../../source/how-to-monitor-simulation.rst:155 +#: ../../source/how-to-monitor-simulation.rst:175 msgid "" "However, you can overwrite the defaults. When starting a simulation, do " "the following (you don't need to overwrite all of them):" @@ -7403,11 +7658,11 @@ msgstr "" "une simulation, fais ce qui suit (tu n'as pas besoin de les écraser " "toutes) :" -#: ../../source/how-to-monitor-simulation.rst:175 +#: ../../source/how-to-monitor-simulation.rst:195 msgid "Let’s also specify the resource for a single client." msgstr "Spécifions également la ressource pour un seul client." -#: ../../source/how-to-monitor-simulation.rst:205 +#: ../../source/how-to-monitor-simulation.rst:225 msgid "" "Now comes the crucial part. Ray will start a new client only when it has " "all the required resources (such that they run in parallel) when the " @@ -7417,14 +7672,15 @@ msgstr "" "ressources nécessaires (de manière à ce qu'ils fonctionnent en parallèle)" " lorsque les ressources le permettront." -#: ../../source/how-to-monitor-simulation.rst:207 +#: ../../source/how-to-monitor-simulation.rst:228 +#, fuzzy msgid "" "In the example above, only one client will be run, so your clients won't " -"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " -"running two clients and therefore enable them to run concurrently. Be " -"careful not to require more resources than available. If you specified " -":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " -"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." +"run concurrently. Setting ``client_num_gpus = 0.5`` would allow running " +"two clients and therefore enable them to run concurrently. Be careful not" +" to require more resources than available. If you specified " +"``client_num_gpus = 2``, the simulation wouldn't start (even if you had 2" +" GPUs but decided to set 1 in ``ray_init_args``)." msgstr "" "Dans l'exemple ci-dessus, un seul client sera exécuté, donc tes clients " "ne fonctionneront pas simultanément. En définissant " @@ -7435,15 +7691,15 @@ msgstr "" "as 2 GPU mais que tu as décidé d'en définir 1 dans " ":code:`ray_init_args`)." -#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 +#: ../../source/how-to-monitor-simulation.rst:235 ../../source/ref-faq.rst:2 msgid "FAQ" msgstr "FAQ" -#: ../../source/how-to-monitor-simulation.rst:214 +#: ../../source/how-to-monitor-simulation.rst:237 msgid "Q: I don't see any metrics logged." msgstr "Q : Je ne vois aucune mesure enregistrée." -#: ../../source/how-to-monitor-simulation.rst:216 +#: ../../source/how-to-monitor-simulation.rst:239 msgid "" "A: The timeframe might not be properly set. The setting is in the top " "right corner (\"Last 30 minutes\" by default). Please change the " @@ -7454,7 +7710,7 @@ msgstr "" "défaut). Modifie le délai pour qu'il corresponde à la période pendant " "laquelle la simulation s'est déroulée." -#: ../../source/how-to-monitor-simulation.rst:218 +#: ../../source/how-to-monitor-simulation.rst:243 msgid "" "Q: I see “Grafana server not detected. Please make sure the Grafana " "server is running and refresh this page” after going to the Metrics tab " @@ -7464,7 +7720,7 @@ msgstr "" "serveur Grafana fonctionne et actualise cette page\" après avoir accédé à" " l'onglet Métriques dans Ray Dashboard." -#: ../../source/how-to-monitor-simulation.rst:220 +#: ../../source/how-to-monitor-simulation.rst:246 msgid "" "A: You probably don't have Grafana running. Please check the running " "services" @@ -7472,15 +7728,16 @@ msgstr "" "R : Grafana n'est probablement pas en cours d'exécution. Vérifie les " "services en cours d'exécution" -#: ../../source/how-to-monitor-simulation.rst:226 +#: ../../source/how-to-monitor-simulation.rst:252 +#, fuzzy msgid "" "Q: I see \"This site can't be reached\" when going to " -"``_." +"http://127.0.0.1:8265." msgstr "" "Q : Je vois \"This site can't be reached\" quand je vais sur " "``_." -#: ../../source/how-to-monitor-simulation.rst:228 +#: ../../source/how-to-monitor-simulation.rst:254 msgid "" "A: Either the simulation has already finished, or you still need to start" " Prometheus." @@ -7488,22 +7745,22 @@ msgstr "" "R : Soit la simulation est déjà terminée, soit tu dois encore démarrer " "Prometheus." -#: ../../source/how-to-monitor-simulation.rst:232 +#: ../../source/how-to-monitor-simulation.rst:257 msgid "Resources" msgstr "Ressources" -#: ../../source/how-to-monitor-simulation.rst:234 +#: ../../source/how-to-monitor-simulation.rst:259 #, fuzzy msgid "" -"Ray Dashboard: ``_" +"Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-" +"started.html" msgstr "" "Tableau de bord Ray : ``_" -#: ../../source/how-to-monitor-simulation.rst:236 +#: ../../source/how-to-monitor-simulation.rst:261 #, fuzzy -msgid "Ray Metrics: ``_" +msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" msgstr "" "Ray Metrics : ``_" @@ -7529,19 +7786,19 @@ msgid "" "VCE." msgstr "" -#: ../../source/how-to-run-simulations.rst:10 +#: ../../source/how-to-run-simulations.rst:19 msgid "" -"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" -" clients. These clients are identical to `non-virtual` clients (i.e. the " +"The ``VirtualClientEngine`` schedules, launches and manages `virtual` " +"clients. These clients are identical to `non-virtual` clients (i.e. the " "ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " "creating a class inheriting, for example, from `flwr.client.NumPyClient " "`_ and therefore behave in an " "identical way. In addition to that, clients managed by the " -":code:`VirtualClientEngine` are:" +"``VirtualClientEngine`` are:" msgstr "" -#: ../../source/how-to-run-simulations.rst:12 +#: ../../source/how-to-run-simulations.rst:26 msgid "" "resource-aware: this means that each client gets assigned a portion of " "the compute and memory on your system. You as a user can control this at " @@ -7550,14 +7807,14 @@ msgid "" "client, the more clients can run concurrently on the same hardware." msgstr "" -#: ../../source/how-to-run-simulations.rst:13 +#: ../../source/how-to-run-simulations.rst:31 msgid "" "self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " +" manually, instead this gets delegated to ``VirtualClientEngine``'s " "internals." msgstr "" -#: ../../source/how-to-run-simulations.rst:14 +#: ../../source/how-to-run-simulations.rst:33 msgid "" "ephemeral: this means that a client is only materialized when it is " "required in the FL process (e.g. to do `fit() `_, an open-source framework for scalable Python " -"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " -"of `Actors `_ to " -"spawn `virtual` clients and run their workload." +"workloads. In particular, Flower's ``VirtualClientEngine`` makes use of " +"`Actors `_ to spawn " +"`virtual` clients and run their workload." msgstr "" -#: ../../source/how-to-run-simulations.rst:20 +#: ../../source/how-to-run-simulations.rst:45 msgid "Launch your Flower simulation" msgstr "" -#: ../../source/how-to-run-simulations.rst:22 +#: ../../source/how-to-run-simulations.rst:47 msgid "" "Running Flower simulations still require you to define your client class," " a strategy, and utility functions to download and load (and potentially " @@ -7589,37 +7846,37 @@ msgid "" " as follows:" msgstr "" -#: ../../source/how-to-run-simulations.rst:44 +#: ../../source/how-to-run-simulations.rst:73 #, fuzzy msgid "VirtualClientEngine resources" msgstr "Moteur de client virtuel" -#: ../../source/how-to-run-simulations.rst:45 +#: ../../source/how-to-run-simulations.rst:75 msgid "" "By default the VCE has access to all system resources (i.e. all CPUs, all" " GPUs, etc) since that is also the default behavior when starting Ray. " "However, in some settings you might want to limit how many of your system" " resources are used for simulation. You can do this via the " -":code:`ray_init_args` input argument to :code:`start_simulation` which " -"the VCE internally passes to Ray's :code:`ray.init` command. For a " -"complete list of settings you can configure check the `ray.init " +"``ray_init_args`` input argument to ``start_simulation`` which the VCE " +"internally passes to Ray's ``ray.init`` command. For a complete list of " +"settings you can configure check the `ray.init " "`_" -" documentation. Do not set :code:`ray_init_args` if you want the VCE to " -"use all your system's CPUs and GPUs." +" documentation. Do not set ``ray_init_args`` if you want the VCE to use " +"all your system's CPUs and GPUs." msgstr "" -#: ../../source/how-to-run-simulations.rst:62 +#: ../../source/how-to-run-simulations.rst:97 msgid "Assigning client resources" msgstr "" -#: ../../source/how-to-run-simulations.rst:63 +#: ../../source/how-to-run-simulations.rst:99 msgid "" -"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" -" nothing else) to each virtual client. This means that if your system has" -" 10 cores, that many virtual clients can be concurrently running." +"By default the ``VirtualClientEngine`` assigns a single CPU core (and " +"nothing else) to each virtual client. This means that if your system has " +"10 cores, that many virtual clients can be concurrently running." msgstr "" -#: ../../source/how-to-run-simulations.rst:65 +#: ../../source/how-to-run-simulations.rst:103 msgid "" "More often than not, you would probably like to adjust the resources your" " clients get assigned based on the complexity (i.e. compute and memory " @@ -7630,34 +7887,32 @@ msgid "" "our case Flower clients):" msgstr "" -#: ../../source/how-to-run-simulations.rst:67 -msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +#: ../../source/how-to-run-simulations.rst:110 +msgid "``num_cpus`` indicates the number of CPU cores a client would get." msgstr "" -#: ../../source/how-to-run-simulations.rst:68 -msgid "" -":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " -"assigned." +#: ../../source/how-to-run-simulations.rst:111 +msgid "``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned." msgstr "" -#: ../../source/how-to-run-simulations.rst:70 +#: ../../source/how-to-run-simulations.rst:113 msgid "Let's see a few examples:" msgstr "" -#: ../../source/how-to-run-simulations.rst:89 +#: ../../source/how-to-run-simulations.rst:132 msgid "" -"While the :code:`client_resources` can be used to control the degree of " +"While the ``client_resources`` can be used to control the degree of " "concurrency in your FL simulation, this does not stop you from running " "dozens, hundreds or even thousands of clients in the same round and " "having orders of magnitude more `dormant` (i.e. not participating in a " "round) clients. Let's say you want to have 100 clients per round but your" " system can only accommodate 8 clients concurrently. The " -":code:`VirtualClientEngine` will schedule 100 jobs to run (each " -"simulating a client sampled by the strategy) and then will execute them " -"in a resource-aware manner in batches of 8." +"``VirtualClientEngine`` will schedule 100 jobs to run (each simulating a " +"client sampled by the strategy) and then will execute them in a resource-" +"aware manner in batches of 8." msgstr "" -#: ../../source/how-to-run-simulations.rst:91 +#: ../../source/how-to-run-simulations.rst:140 msgid "" "To understand all the intricate details on how resources are used to " "schedule FL clients and how to define custom resources, please take a " @@ -7665,19 +7920,19 @@ msgid "" "core/scheduling/resources.html>`_." msgstr "" -#: ../../source/how-to-run-simulations.rst:94 +#: ../../source/how-to-run-simulations.rst:145 #, fuzzy msgid "Simulation examples" msgstr "Exemples de PyTorch" -#: ../../source/how-to-run-simulations.rst:96 +#: ../../source/how-to-run-simulations.rst:147 msgid "" "A few ready-to-run complete examples for Flower simulation in " "Tensorflow/Keras and PyTorch are provided in the `Flower repository " "`_. You can run them on Google Colab too:" msgstr "" -#: ../../source/how-to-run-simulations.rst:98 +#: ../../source/how-to-run-simulations.rst:151 #, fuzzy msgid "" "`Tensorflow/Keras Simulation " @@ -7688,114 +7943,112 @@ msgstr "" "`_" -#: ../../source/how-to-run-simulations.rst:99 +#: ../../source/how-to-run-simulations.rst:154 msgid "" "`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " "MNIST." msgstr "" -#: ../../source/how-to-run-simulations.rst:104 +#: ../../source/how-to-run-simulations.rst:159 #, fuzzy msgid "Multi-node Flower simulations" msgstr "Simulation de moniteur" -#: ../../source/how-to-run-simulations.rst:106 +#: ../../source/how-to-run-simulations.rst:161 msgid "" -"Flower's :code:`VirtualClientEngine` allows you to run FL simulations " -"across multiple compute nodes. Before starting your multi-node simulation" -" ensure that you:" +"Flower's ``VirtualClientEngine`` allows you to run FL simulations across " +"multiple compute nodes. Before starting your multi-node simulation ensure" +" that you:" msgstr "" -#: ../../source/how-to-run-simulations.rst:108 +#: ../../source/how-to-run-simulations.rst:164 msgid "Have the same Python environment in all nodes." msgstr "" -#: ../../source/how-to-run-simulations.rst:109 +#: ../../source/how-to-run-simulations.rst:165 msgid "Have a copy of your code (e.g. your entire repo) in all nodes." msgstr "" -#: ../../source/how-to-run-simulations.rst:110 +#: ../../source/how-to-run-simulations.rst:166 msgid "" "Have a copy of your dataset in all nodes (more about this in " ":ref:`simulation considerations `)" msgstr "" -#: ../../source/how-to-run-simulations.rst:111 +#: ../../source/how-to-run-simulations.rst:168 msgid "" -"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " -"`_ so the " -":code:`VirtualClientEngine` attaches to a running Ray instance." +"Pass ``ray_init_args={\"address\"=\"auto\"}`` to `start_simulation `_ so the " +"``VirtualClientEngine`` attaches to a running Ray instance." msgstr "" -#: ../../source/how-to-run-simulations.rst:112 +#: ../../source/how-to-run-simulations.rst:171 msgid "" -"Start Ray on you head node: on the terminal type :code:`ray start " -"--head`. This command will print a few lines, one of which indicates how " -"to attach other nodes to the head node." +"Start Ray on you head node: on the terminal type ``ray start --head``. " +"This command will print a few lines, one of which indicates how to attach" +" other nodes to the head node." msgstr "" -#: ../../source/how-to-run-simulations.rst:113 +#: ../../source/how-to-run-simulations.rst:174 msgid "" "Attach other nodes to the head node: copy the command shown after " "starting the head and execute it on terminal of a new node: for example " -":code:`ray start --address='192.168.1.132:6379'`" +"``ray start --address='192.168.1.132:6379'``" msgstr "" -#: ../../source/how-to-run-simulations.rst:115 +#: ../../source/how-to-run-simulations.rst:178 msgid "" "With all the above done, you can run your code from the head node as you " "would if the simulation was running on a single node." msgstr "" -#: ../../source/how-to-run-simulations.rst:117 +#: ../../source/how-to-run-simulations.rst:181 msgid "" "Once your simulation is finished, if you'd like to dismantle your cluster" -" you simply need to run the command :code:`ray stop` in each node's " -"terminal (including the head node)." +" you simply need to run the command ``ray stop`` in each node's terminal " +"(including the head node)." msgstr "" -#: ../../source/how-to-run-simulations.rst:120 +#: ../../source/how-to-run-simulations.rst:185 msgid "Multi-node simulation good-to-know" msgstr "" -#: ../../source/how-to-run-simulations.rst:122 +#: ../../source/how-to-run-simulations.rst:187 msgid "" "Here we list a few interesting functionality when running multi-node FL " "simulations:" msgstr "" -#: ../../source/how-to-run-simulations.rst:124 +#: ../../source/how-to-run-simulations.rst:189 msgid "" -"User :code:`ray status` to check all nodes connected to your head node as" -" well as the total resources available to the " -":code:`VirtualClientEngine`." +"User ``ray status`` to check all nodes connected to your head node as " +"well as the total resources available to the ``VirtualClientEngine``." msgstr "" -#: ../../source/how-to-run-simulations.rst:126 +#: ../../source/how-to-run-simulations.rst:192 msgid "" "When attaching a new node to the head, all its resources (i.e. all CPUs, " "all GPUs) will be visible by the head node. This means that the " -":code:`VirtualClientEngine` can schedule as many `virtual` clients as " -"that node can possible run. In some settings you might want to exclude " -"certain resources from the simulation. You can do this by appending " -"`--num-cpus=` and/or `--num-" -"gpus=` in any :code:`ray start` command (including " -"when starting the head)" +"``VirtualClientEngine`` can schedule as many `virtual` clients as that " +"node can possible run. In some settings you might want to exclude certain" +" resources from the simulation. You can do this by appending `--num-" +"cpus=` and/or `--num-gpus=` in " +"any ``ray start`` command (including when starting the head)" msgstr "" -#: ../../source/how-to-run-simulations.rst:132 +#: ../../source/how-to-run-simulations.rst:202 #, fuzzy msgid "Considerations for simulations" msgstr "Simulation de moniteur" -#: ../../source/how-to-run-simulations.rst:135 +#: ../../source/how-to-run-simulations.rst:206 msgid "" "We are actively working on these fronts so to make it trivial to run any " "FL workload with Flower simulation." msgstr "" -#: ../../source/how-to-run-simulations.rst:138 +#: ../../source/how-to-run-simulations.rst:209 msgid "" "The current VCE allows you to run Federated Learning workloads in " "simulation mode whether you are prototyping simple scenarios on your " @@ -7806,61 +8059,60 @@ msgid "" "couple of current limitations in our implementation." msgstr "" -#: ../../source/how-to-run-simulations.rst:141 +#: ../../source/how-to-run-simulations.rst:217 #, fuzzy msgid "GPU resources" msgstr "Ressources" -#: ../../source/how-to-run-simulations.rst:143 +#: ../../source/how-to-run-simulations.rst:219 msgid "" "The VCE assigns a share of GPU memory to a client that specifies the key " -":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " +"``num_gpus`` in ``client_resources``. This being said, Ray (used " "internally by the VCE) is by default:" msgstr "" -#: ../../source/how-to-run-simulations.rst:146 +#: ../../source/how-to-run-simulations.rst:222 msgid "" "not aware of the total VRAM available on the GPUs. This means that if you" -" set :code:`num_gpus=0.5` and you have two GPUs in your system with " -"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" -" concurrently." +" set ``num_gpus=0.5`` and you have two GPUs in your system with different" +" (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients " +"concurrently." msgstr "" -#: ../../source/how-to-run-simulations.rst:147 +#: ../../source/how-to-run-simulations.rst:225 msgid "" "not aware of other unrelated (i.e. not created by the VCE) workloads are " "running on the GPU. Two takeaways from this are:" msgstr "" -#: ../../source/how-to-run-simulations.rst:149 +#: ../../source/how-to-run-simulations.rst:228 msgid "" "Your Flower server might need a GPU to evaluate the `global model` after " "aggregation (by instance when making use of the `evaluate method `_)" msgstr "" -#: ../../source/how-to-run-simulations.rst:150 +#: ../../source/how-to-run-simulations.rst:231 msgid "" "If you want to run several independent Flower simulations on the same " "machine you need to mask-out your GPUs with " -":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " -"experiment." +"``CUDA_VISIBLE_DEVICES=\"\"`` when launching your experiment." msgstr "" -#: ../../source/how-to-run-simulations.rst:153 +#: ../../source/how-to-run-simulations.rst:235 msgid "" -"In addition, the GPU resource limits passed to :code:`client_resources` " -"are not `enforced` (i.e. they can be exceeded) which can result in the " +"In addition, the GPU resource limits passed to ``client_resources`` are " +"not `enforced` (i.e. they can be exceeded) which can result in the " "situation of client using more VRAM than the ratio specified when " "starting the simulation." msgstr "" -#: ../../source/how-to-run-simulations.rst:156 +#: ../../source/how-to-run-simulations.rst:240 #, fuzzy msgid "TensorFlow with GPUs" msgstr "Exemples de TensorFlow" -#: ../../source/how-to-run-simulations.rst:158 +#: ../../source/how-to-run-simulations.rst:242 msgid "" "When `using a GPU with TensorFlow " "`_ nearly your entire GPU memory of" @@ -7872,17 +8124,17 @@ msgid "" "`_." msgstr "" -#: ../../source/how-to-run-simulations.rst:160 +#: ../../source/how-to-run-simulations.rst:249 msgid "" "This would need to be done in the main process (which is where the server" " would run) and in each Actor created by the VCE. By means of " -":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " -"in order to specify a function to be executed upon actor initialization. " -"In this case, to enable GPU growth for TF workloads. It would look as " +"``actor_kwargs`` we can pass the reserved key `\"on_actor_init_fn\"` in " +"order to specify a function to be executed upon actor initialization. In " +"this case, to enable GPU growth for TF workloads. It would look as " "follows:" msgstr "" -#: ../../source/how-to-run-simulations.rst:179 +#: ../../source/how-to-run-simulations.rst:272 #, fuzzy msgid "" "This is precisely the mechanism used in `Tensorflow/Keras Simulation " @@ -7893,11 +8145,11 @@ msgstr "" "`_" -#: ../../source/how-to-run-simulations.rst:183 +#: ../../source/how-to-run-simulations.rst:276 msgid "Multi-node setups" msgstr "" -#: ../../source/how-to-run-simulations.rst:185 +#: ../../source/how-to-run-simulations.rst:278 msgid "" "The VCE does not currently offer a way to control on which node a " "particular `virtual` client is executed. In other words, if more than a " @@ -7910,7 +8162,7 @@ msgid "" "circumvent data duplication." msgstr "" -#: ../../source/how-to-run-simulations.rst:187 +#: ../../source/how-to-run-simulations.rst:286 msgid "" "By definition virtual clients are `stateless` due to their ephemeral " "nature. A client state can be implemented as part of the Flower client " @@ -7939,17 +8191,17 @@ msgid "Model checkpointing" msgstr "Point de contrôle du modèle" #: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +#, fuzzy msgid "" "Model updates can be persisted on the server-side by customizing " -":code:`Strategy` methods. Implementing custom strategies is always an " -"option, but for many cases it may be more convenient to simply customize " -"an existing strategy. The following code example defines a new " -":code:`SaveModelStrategy` which customized the existing built-in " -":code:`FedAvg` strategy. In particular, it customizes " -":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " -"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" -" before it returns those aggregated weights to the caller (i.e., the " -"server):" +"``Strategy`` methods. Implementing custom strategies is always an option," +" but for many cases it may be more convenient to simply customize an " +"existing strategy. The following code example defines a new " +"``SaveModelStrategy`` which customized the existing built-in ``FedAvg`` " +"strategy. In particular, it customizes ``aggregate_fit`` by calling " +"``aggregate_fit`` in the base class (``FedAvg``). It then continues to " +"save returned (aggregated) weights before it returns those aggregated " +"weights to the caller (i.e., the server):" msgstr "" "Les mises à jour du modèle peuvent être conservées côté serveur en " "personnalisant les méthodes :code:`Strategy`. L'implémentation de " @@ -7963,12 +8215,12 @@ msgstr "" "retournés (agrégés) avant de renvoyer ces poids agrégés à l'appelant " "(c'est-à-dire le serveur) :" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:53 #, fuzzy msgid "Save and load PyTorch checkpoints" msgstr "Sauvegarde et chargement des points de contrôle PyTorch" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:55 #, fuzzy msgid "" "Similar to the previous example but with a few extra steps, we'll show " @@ -7986,7 +8238,7 @@ msgstr "" "transformés en ``state_dict`` PyTorch en suivant la structure de la " "classe ``OrderedDict``." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:98 msgid "" "To load your progress, you simply append the following lines to your " "code. Note that this will iterate over all saved checkpoints and load the" @@ -7996,7 +8248,7 @@ msgstr "" "à ton code. Note que cela va itérer sur tous les points de contrôle " "sauvegardés et charger le plus récent :" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:111 msgid "" "Return/use this object of type ``Parameters`` wherever necessary, such as" " in the ``initial_parameters`` when defining a ``Strategy``." @@ -8019,12 +8271,12 @@ msgstr "" "changements qui nécessitent de modifier le code des projets de la série " "0.x existants." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 -#: ../../source/how-to-upgrade-to-flower-next.rst:43 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-upgrade-to-flower-next.rst:63 msgid "Install update" msgstr "Installer la mise à jour" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 msgid "" "Here's how to update an existing installation to Flower 1.0 using either " "pip or Poetry:" @@ -8032,11 +8284,11 @@ msgstr "" "Voici comment mettre à jour une installation existante vers Flower 1.0 en" " utilisant soit pip soit Poetry :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 msgid "pip: add ``-U`` when installing." msgstr "pip : ajoute ``-U`` lors de l'installation." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:16 msgid "" "``python -m pip install -U flwr`` (when using ``start_server`` and " "``start_client``)" @@ -8044,7 +8296,7 @@ msgstr "" "``python -m pip install -U flwr`` (lors de l'utilisation de " "``start_server`` et ``start_client``)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 msgid "" "``python -m pip install -U 'flwr[simulation]'`` (when using " "``start_simulation``)" @@ -8052,7 +8304,7 @@ msgstr "" "``python -m pip install -U 'flwr[simulation]'`` (lors de l'utilisation de" " ``start_simulation``)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 msgid "" "Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " "reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " @@ -8062,13 +8314,13 @@ msgstr "" "puis réinstallez (n'oubliez pas de supprimer ``poetry.lock`` via ``rm " "poetry.lock`` avant d'exécuter ``poetry install``)." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" msgstr "" "``flwr = \"^1.0.0\"`` (lors de l'utilisation de ``start_server`` et " "``start_client``)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 msgid "" "``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " "using ``start_simulation``)" @@ -8076,22 +8328,22 @@ msgstr "" "``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (lors de " "l'utilisation de ``start_simulation``)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 -#: ../../source/how-to-upgrade-to-flower-next.rst:100 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 +#: ../../source/how-to-upgrade-to-flower-next.rst:120 msgid "Required changes" msgstr "Changements nécessaires" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:30 msgid "The following breaking changes require manual updates." msgstr "" "Les changements de rupture suivants nécessitent des mises à jour " "manuelles." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 msgid "General" msgstr "Généralités" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:35 msgid "" "Pass all arguments as keyword arguments (not as positional arguments). " "Here's an example:" @@ -8099,7 +8351,7 @@ msgstr "" "Passe tous les arguments comme des arguments de mots-clés (et non comme " "des arguments de position). Voici un exemple :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:38 msgid "" "Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " "FlowerClient())``" @@ -8107,7 +8359,7 @@ msgstr "" "Flower 0.19 (arguments positionnels) : ``start_client(\"127.0.0.1:8080\"," " FlowerClient())``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 msgid "" "Flower 1.0 (keyword arguments): " "``start_client(server_address=\"127.0.0.1:8080\", " @@ -8117,12 +8369,12 @@ msgstr "" "``start_client(server_address=\"127.0.0.1:8080\", " "client=FlowerClient())``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 #: ../../source/ref-api/flwr.client.Client.rst:2 msgid "Client" msgstr "Client" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 msgid "" "Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " "``def get_parameters(self, config):``" @@ -8130,7 +8382,7 @@ msgstr "" "Sous-classes de ``NumPyClient`` : changez ``def get_parameters(self):`` " "en ``def get_parameters(self, config):``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 msgid "" "Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " "get_parameters(self, ins: GetParametersIns):``" @@ -8138,11 +8390,11 @@ msgstr "" "Sous-classes de ``Client`` : changez ``def get_parameters(self):`` en " "``def get_parameters(self, ins : GetParametersIns):``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 msgid "Strategies / ``start_server`` / ``start_simulation``" msgstr "Stratégies / ``démarrer_serveur`` / ``démarrer_simulation``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:53 msgid "" "Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " "``start_simulation``. Here's an example:" @@ -8150,7 +8402,7 @@ msgstr "" "Passez ``ServerConfig`` (au lieu d'un dictionnaire) à ``start_server`` et" " ``start_simulation``. Voici un exemple :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 msgid "" "Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " "\"round_timeout\": 600.0}, ...)``" @@ -8158,7 +8410,7 @@ msgstr "" "Flower 0.19 : ``start_server(..., config={\"num_rounds\" : 3, " "\"round_timeout\" : 600.0}, ...)``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 msgid "" "Flower 1.0: ``start_server(..., " "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " @@ -8168,7 +8420,7 @@ msgstr "" "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 msgid "" "Replace ``num_rounds=1`` in ``start_simulation`` with the new " "``config=ServerConfig(...)`` (see previous item)" @@ -8176,7 +8428,7 @@ msgstr "" "Remplacer ``num_rounds=1`` dans ``start_simulation`` par le nouveau " "``config=ServerConfig(...)`` (voir point précédent)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:63 msgid "" "Remove ``force_final_distributed_eval`` parameter from calls to " "``start_server``. Distributed evaluation on all clients can be enabled by" @@ -8188,19 +8440,19 @@ msgstr "" "activée en configurant la stratégie pour échantillonner tous les clients " "pour l'évaluation après le dernier tour de formation." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:66 msgid "Rename parameter/ndarray conversion functions:" msgstr "Renomme les fonctions de conversion des paramètres et des tableaux :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:69 msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" msgstr "``Poids_à_paramètres`` --> ``Réseaux_à_paramètres``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 msgid "" "Strategy initialization: if the strategy relies on the default values for" " ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " @@ -8217,23 +8469,23 @@ msgstr "" "stratégie) doivent maintenant initialiser manuellement FedAvg avec " "``fraction_fit`` et ``fraction_evaluate`` fixés à ``0.1``." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" msgstr "Renommer les paramètres de stratégie intégrés (par exemple, ``FedAvg``) :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:79 msgid "``fraction_eval`` --> ``fraction_evaluate``" msgstr "``fraction_eval`` --> ``fraction_evaluate``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 msgid "``min_eval_clients`` --> ``min_evaluate_clients``" msgstr "``min_eval_clients` --> ``min_evaluate_clients``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:81 msgid "``eval_fn`` --> ``evaluate_fn``" msgstr "``eval_fn`` --> ``evaluate_fn``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:83 msgid "" "Rename ``rnd`` to ``server_round``. This impacts multiple methods and " "functions, for example, ``configure_fit``, ``aggregate_fit``, " @@ -8243,11 +8495,11 @@ msgstr "" "méthodes et fonctions, par exemple, ``configure_fit``, ``aggregate_fit``," " ``configure_evaluate``, ``aggregate_evaluate``, et ``evaluate_fn``." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" msgstr "Ajoute ``server_round`` et ``config`` à `evaluate_fn`` :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 msgid "" "Flower 0.19: ``def evaluate(parameters: NDArrays) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" @@ -8255,7 +8507,7 @@ msgstr "" "Flower 0.19 : ``def evaluate(parameters : NDArrays) -> " "Optional[Tuple[float, Dict[str, Scalar]]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 msgid "" "Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " "config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " @@ -8265,11 +8517,11 @@ msgstr "" "config : Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " "Scalar]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:94 msgid "Custom strategies" msgstr "Stratégies personnalisées" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:96 msgid "" "The type of parameter ``failures`` has changed from " "``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " @@ -8282,7 +8534,7 @@ msgstr "" "``aggregate_fit``) et ``List[Union[Tuple[ClientProxy, EvaluateRes], " "BaseException]]`` (dans ``aggregate_evaluate``)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:100 msgid "" "The ``Strategy`` method ``evaluate`` now receives the current round of " "federated learning/evaluation as the first parameter:" @@ -8290,7 +8542,7 @@ msgstr "" "La méthode ``Stratégie`` `évaluer`` reçoit maintenant le cycle actuel " "d'apprentissage/évaluation fédéré comme premier paramètre :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 msgid "" "Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" @@ -8298,7 +8550,7 @@ msgstr "" "Flower 0.19 : ``def evaluate(self, parameters : Parameters) -> " "Optional[Tuple[float, Dict[str, Scalar]]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 msgid "" "Flower 1.0: ``def evaluate(self, server_round: int, parameters: " "Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" @@ -8306,11 +8558,11 @@ msgstr "" "Flower 1.0 : ``def evaluate(self, server_round : int, parameters : " "Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 msgid "Optional improvements" msgstr "Améliorations facultatives" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:111 msgid "" "Along with the necessary changes above, there are a number of potential " "improvements that just became possible:" @@ -8319,7 +8571,7 @@ msgstr "" "certain nombre d'améliorations potentielles qui viennent d'être rendues " "possibles :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 msgid "" "Remove \"placeholder\" methods from subclasses of ``Client`` or " "``NumPyClient``. If you, for example, use server-side evaluation, then " @@ -8331,7 +8583,7 @@ msgstr "" "serveur, alors les implémentations \"placeholder\" de ``evaluate`` ne " "sont plus nécessaires." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:117 msgid "" "Configure the round timeout via ``start_simulation``: " "``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " @@ -8341,12 +8593,12 @@ msgstr "" "``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " "round_timeout=600.0), ...)``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 -#: ../../source/how-to-upgrade-to-flower-next.rst:317 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:121 +#: ../../source/how-to-upgrade-to-flower-next.rst:348 msgid "Further help" msgstr "Aide supplémentaire" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 msgid "" "Most official `Flower code examples " "`_ are already updated" @@ -8375,7 +8627,7 @@ msgid "" "1.8." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:9 +#: ../../source/how-to-upgrade-to-flower-next.rst:11 msgid "" "This guide shows how to reuse pre-``1.8`` Flower code with minimum code " "changes by using the *compatibility layer* in Flower Next. In another " @@ -8383,11 +8635,11 @@ msgid "" "Next APIs." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:13 +#: ../../source/how-to-upgrade-to-flower-next.rst:15 msgid "Let's dive in!" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:48 +#: ../../source/how-to-upgrade-to-flower-next.rst:68 #, fuzzy msgid "" "Here's how to update an existing installation of Flower to Flower Next " @@ -8396,26 +8648,26 @@ msgstr "" "Voici comment mettre à jour une installation existante vers Flower 1.0 en" " utilisant soit pip soit Poetry :" -#: ../../source/how-to-upgrade-to-flower-next.rst:54 +#: ../../source/how-to-upgrade-to-flower-next.rst:74 msgid "or if you need Flower Next with simulation:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:61 +#: ../../source/how-to-upgrade-to-flower-next.rst:80 msgid "" "Ensure you set the following version constraint in your " "``requirements.txt``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:71 +#: ../../source/how-to-upgrade-to-flower-next.rst:90 msgid "or ``pyproject.toml``:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:82 +#: ../../source/how-to-upgrade-to-flower-next.rst:101 #, fuzzy msgid "Using Poetry" msgstr "Utiliser la poésie (recommandé)" -#: ../../source/how-to-upgrade-to-flower-next.rst:84 +#: ../../source/how-to-upgrade-to-flower-next.rst:103 #, fuzzy msgid "" "Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " @@ -8426,14 +8678,14 @@ msgstr "" "puis réinstallez (n'oubliez pas de supprimer ``poetry.lock`` via ``rm " "poetry.lock`` avant d'exécuter ``poetry install``)." -#: ../../source/how-to-upgrade-to-flower-next.rst:86 +#: ../../source/how-to-upgrade-to-flower-next.rst:106 #, fuzzy msgid "" "Ensure you set the following version constraint in your " "``pyproject.toml``:" msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." -#: ../../source/how-to-upgrade-to-flower-next.rst:102 +#: ../../source/how-to-upgrade-to-flower-next.rst:122 msgid "" "In Flower Next, the *infrastructure* and *application layers* have been " "decoupled. Instead of starting a client in code via ``start_client()``, " @@ -8446,33 +8698,33 @@ msgid "" "way:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:109 +#: ../../source/how-to-upgrade-to-flower-next.rst:131 #, fuzzy msgid "|clientapp_link|_" msgstr "client" -#: ../../source/how-to-upgrade-to-flower-next.rst:110 +#: ../../source/how-to-upgrade-to-flower-next.rst:133 msgid "" "Wrap your existing client with |clientapp_link|_ instead of launching it " "via |startclient_link|_. Here's an example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:132 +#: ../../source/how-to-upgrade-to-flower-next.rst:156 #, fuzzy msgid "|serverapp_link|_" msgstr "serveur" -#: ../../source/how-to-upgrade-to-flower-next.rst:133 +#: ../../source/how-to-upgrade-to-flower-next.rst:158 msgid "" "Wrap your existing strategy with |serverapp_link|_ instead of starting " "the server via |startserver_link|_. Here's an example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:154 +#: ../../source/how-to-upgrade-to-flower-next.rst:179 msgid "Deployment" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:155 +#: ../../source/how-to-upgrade-to-flower-next.rst:181 msgid "" "Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " "in sequence, |flowernext_clientapp_link|_ (2x) and " @@ -8480,13 +8732,13 @@ msgid "" " `server.py` as Python scripts." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:158 +#: ../../source/how-to-upgrade-to-flower-next.rst:184 msgid "" "Here's an example to start the server without HTTPS (only for " "prototyping):" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:174 +#: ../../source/how-to-upgrade-to-flower-next.rst:200 msgid "" "Here's another example to start with HTTPS. Use the ``--ssl-ca-" "certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " @@ -8494,19 +8746,19 @@ msgid "" "private key)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:201 +#: ../../source/how-to-upgrade-to-flower-next.rst:228 #, fuzzy msgid "Simulation in CLI" msgstr "Simulation de moniteur" -#: ../../source/how-to-upgrade-to-flower-next.rst:202 +#: ../../source/how-to-upgrade-to-flower-next.rst:230 msgid "" "Wrap your existing client and strategy with |clientapp_link|_ and " "|serverapp_link|_, respectively. There is no need to use |startsim_link|_" " anymore. Here's an example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:232 +#: ../../source/how-to-upgrade-to-flower-next.rst:263 msgid "" "Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " "``client_app`` object in the code instead of executing the Python script." @@ -8514,24 +8766,24 @@ msgid "" "objects are in a ``sim.py`` module):" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:249 +#: ../../source/how-to-upgrade-to-flower-next.rst:280 msgid "" "Set default resources for each |clientapp_link|_ using the ``--backend-" "config`` command line argument instead of setting the " "``client_resources`` argument in |startsim_link|_. Here's an example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:275 +#: ../../source/how-to-upgrade-to-flower-next.rst:304 msgid "Simulation in a Notebook" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:276 +#: ../../source/how-to-upgrade-to-flower-next.rst:306 msgid "" "Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " "an example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:319 +#: ../../source/how-to-upgrade-to-flower-next.rst:350 #, fuzzy msgid "" "Some official `Flower code examples `_ " @@ -8549,19 +8801,19 @@ msgstr "" "Flower `_ et utilise le canal " "``#questions``." -#: ../../source/how-to-upgrade-to-flower-next.rst:325 +#: ../../source/how-to-upgrade-to-flower-next.rst:357 #, fuzzy msgid "Important" msgstr "Changements importants :" -#: ../../source/how-to-upgrade-to-flower-next.rst:328 +#: ../../source/how-to-upgrade-to-flower-next.rst:359 msgid "" "As we continuously enhance Flower Next at a rapid pace, we'll be " "periodically updating this guide. Please feel free to share any feedback " "with us!" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:334 +#: ../../source/how-to-upgrade-to-flower-next.rst:365 msgid "Happy migrating! 🚀" msgstr "" @@ -8575,7 +8827,7 @@ msgid "" " interfaces may change in future versions.**" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:6 +#: ../../source/how-to-use-built-in-mods.rst:7 msgid "" "In this tutorial, we will learn how to utilize built-in mods to augment " "the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " @@ -8583,105 +8835,105 @@ msgid "" "the ``ClientApp``." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:9 +#: ../../source/how-to-use-built-in-mods.rst:12 msgid "What are Mods?" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:11 +#: ../../source/how-to-use-built-in-mods.rst:14 msgid "" "A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " "or inspect the incoming ``Message`` and the resulting outgoing " "``Message``. The signature for a ``Mod`` is as follows:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:18 +#: ../../source/how-to-use-built-in-mods.rst:23 msgid "A typical mod function might look something like this:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:31 +#: ../../source/how-to-use-built-in-mods.rst:36 msgid "Using Mods" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:33 +#: ../../source/how-to-use-built-in-mods.rst:38 msgid "To use mods in your ``ClientApp``, you can follow these steps:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:36 +#: ../../source/how-to-use-built-in-mods.rst:41 msgid "1. Import the required mods" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:38 +#: ../../source/how-to-use-built-in-mods.rst:43 msgid "First, import the built-in mod you intend to use:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:46 +#: ../../source/how-to-use-built-in-mods.rst:51 msgid "2. Define your client function" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:48 +#: ../../source/how-to-use-built-in-mods.rst:53 msgid "" "Define your client function (``client_fn``) that will be wrapped by the " "mod(s):" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:57 +#: ../../source/how-to-use-built-in-mods.rst:62 msgid "3. Create the ``ClientApp`` with mods" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:59 +#: ../../source/how-to-use-built-in-mods.rst:64 msgid "" "Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " "argument. The order in which you provide the mods matters:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:72 +#: ../../source/how-to-use-built-in-mods.rst:78 #, fuzzy msgid "Order of execution" msgstr "Dépréciations" -#: ../../source/how-to-use-built-in-mods.rst:74 +#: ../../source/how-to-use-built-in-mods.rst:80 msgid "" "When the ``ClientApp`` runs, the mods are executed in the order they are " "provided in the list:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:76 +#: ../../source/how-to-use-built-in-mods.rst:83 msgid "``example_mod_1`` (outermost mod)" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:77 +#: ../../source/how-to-use-built-in-mods.rst:84 msgid "``example_mod_2`` (next mod)" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:78 +#: ../../source/how-to-use-built-in-mods.rst:85 msgid "" "Message handler (core function that handles the incoming ``Message`` and " "returns the outgoing ``Message``)" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:79 +#: ../../source/how-to-use-built-in-mods.rst:87 msgid "``example_mod_2`` (on the way back)" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:80 +#: ../../source/how-to-use-built-in-mods.rst:88 msgid "``example_mod_1`` (outermost mod on the way back)" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:82 +#: ../../source/how-to-use-built-in-mods.rst:90 msgid "" "Each mod has a chance to inspect and modify the incoming ``Message`` " "before passing it to the next mod, and likewise with the outgoing " "``Message`` before returning it up the stack." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:87 +#: ../../source/how-to-use-built-in-mods.rst:97 msgid "" "By following this guide, you have learned how to effectively use mods to " "enhance your ``ClientApp``'s functionality. Remember that the order of " "mods is crucial and affects how the input and output are processed." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:89 +#: ../../source/how-to-use-built-in-mods.rst:101 msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" msgstr "" @@ -8690,14 +8942,14 @@ msgstr "" msgid "Use Differential Privacy" msgstr "Confidentialité différentielle" -#: ../../source/how-to-use-differential-privacy.rst:3 +#: ../../source/how-to-use-differential-privacy.rst:4 msgid "" "This guide explains how you can utilize differential privacy in the " "Flower framework. If you are not yet familiar with differential privacy, " "you can refer to :doc:`explanation-differential-privacy`." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:7 +#: ../../source/how-to-use-differential-privacy.rst:10 msgid "" "Differential Privacy in Flower is in a preview phase. If you plan to use " "these features in a production environment with sensitive data, feel free" @@ -8705,7 +8957,7 @@ msgid "" "to best use these features." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:12 +#: ../../source/how-to-use-differential-privacy.rst:17 msgid "" "This approach consists of two separate phases: clipping of the updates " "and adding noise to the aggregated model. For the clipping phase, Flower " @@ -8713,7 +8965,7 @@ msgid "" "the server side or the client side." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:15 +#: ../../source/how-to-use-differential-privacy.rst:21 msgid "" "**Server-side Clipping**: This approach has the advantage of the server " "enforcing uniform clipping across all clients' updates and reducing the " @@ -8722,7 +8974,7 @@ msgid "" "the need to perform the clipping operation for all clients." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:16 +#: ../../source/how-to-use-differential-privacy.rst:26 msgid "" "**Client-side Clipping**: This approach has the advantage of reducing the" " computational overhead on the server. However, it also has the " @@ -8730,19 +8982,19 @@ msgid "" "control over the clipping process." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:21 +#: ../../source/how-to-use-differential-privacy.rst:31 #, fuzzy msgid "Server-side Clipping" msgstr "Logique côté serveur" -#: ../../source/how-to-use-differential-privacy.rst:22 +#: ../../source/how-to-use-differential-privacy.rst:33 msgid "" -"For central DP with server-side clipping, there are two :code:`Strategy` " -"classes that act as wrappers around the actual :code:`Strategy` instance " -"(for example, :code:`FedAvg`). The two wrapper classes are " -":code:`DifferentialPrivacyServerSideFixedClipping` and " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " -"adaptive clipping." +"For central DP with server-side clipping, there are two ``Strategy`` " +"classes that act as wrappers around the actual ``Strategy`` instance (for" +" example, ``FedAvg``). The two wrapper classes are " +"``DifferentialPrivacyServerSideFixedClipping`` and " +"``DifferentialPrivacyServerSideAdaptiveClipping`` for fixed and adaptive " +"clipping." msgstr "" #: ../../source/how-to-use-differential-privacy.rst:-1 @@ -8750,30 +9002,29 @@ msgstr "" msgid "server side clipping" msgstr "Logique côté serveur" -#: ../../source/how-to-use-differential-privacy.rst:31 +#: ../../source/how-to-use-differential-privacy.rst:43 msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use server-" -"side fixed clipping using the " -":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " -"same approach can be used with " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " +"The code sample below enables the ``FedAvg`` strategy to use server-side " +"fixed clipping using the ``DifferentialPrivacyServerSideFixedClipping`` " +"wrapper class. The same approach can be used with " +"``DifferentialPrivacyServerSideAdaptiveClipping`` by adjusting the " "corresponding input parameters." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:52 +#: ../../source/how-to-use-differential-privacy.rst:64 #, fuzzy msgid "Client-side Clipping" msgstr "Logique côté client" -#: ../../source/how-to-use-differential-privacy.rst:53 +#: ../../source/how-to-use-differential-privacy.rst:66 msgid "" "For central DP with client-side clipping, the server sends the clipping " "value to selected clients on each round. Clients can use existing Flower " -":code:`Mods` to perform the clipping. Two mods are available for fixed " -"and adaptive client-side clipping: :code:`fixedclipping_mod` and " -":code:`adaptiveclipping_mod` with corresponding server-side wrappers " -":code:`DifferentialPrivacyClientSideFixedClipping` and " -":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +"``Mods`` to perform the clipping. Two mods are available for fixed and " +"adaptive client-side clipping: ``fixedclipping_mod`` and " +"``adaptiveclipping_mod`` with corresponding server-side wrappers " +"``DifferentialPrivacyClientSideFixedClipping`` and " +"``DifferentialPrivacyClientSideAdaptiveClipping``." msgstr "" #: ../../source/how-to-use-differential-privacy.rst:-1 @@ -8781,22 +9032,22 @@ msgstr "" msgid "client side clipping" msgstr "Logique côté client" -#: ../../source/how-to-use-differential-privacy.rst:63 +#: ../../source/how-to-use-differential-privacy.rst:78 msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use " -"differential privacy with client-side fixed clipping using both the " -":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " -"the client, :code:`fixedclipping_mod`:" +"The code sample below enables the ``FedAvg`` strategy to use differential" +" privacy with client-side fixed clipping using both the " +"``DifferentialPrivacyClientSideFixedClipping`` wrapper class and, on the " +"client, ``fixedclipping_mod``:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:80 +#: ../../source/how-to-use-differential-privacy.rst:97 msgid "" -"In addition to the server-side strategy wrapper, the :code:`ClientApp` " -"needs to configure the matching :code:`fixedclipping_mod` to perform the " -"client-side clipping:" +"In addition to the server-side strategy wrapper, the ``ClientApp`` needs " +"to configure the matching ``fixedclipping_mod`` to perform the client-" +"side clipping:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:97 +#: ../../source/how-to-use-differential-privacy.rst:115 msgid "" "To utilize local differential privacy (DP) and add noise to the client " "model parameters before transmitting them to the server in Flower, you " @@ -8808,11 +9059,11 @@ msgstr "" msgid "local DP mod" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:104 -msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +#: ../../source/how-to-use-differential-privacy.rst:125 +msgid "Below is a code example that shows how to use ``LocalDpMod``:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:122 +#: ../../source/how-to-use-differential-privacy.rst:140 msgid "" "Please note that the order of mods, especially those that modify " "parameters, is important when using multiple modifiers. Typically, " @@ -8820,11 +9071,11 @@ msgid "" "parameters." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:125 +#: ../../source/how-to-use-differential-privacy.rst:145 msgid "Local Training using Privacy Engines" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:126 +#: ../../source/how-to-use-differential-privacy.rst:147 msgid "" "For ensuring data instance-level privacy during local model training on " "the client side, consider leveraging privacy engines such as Opacus and " @@ -8841,16 +9092,17 @@ msgid "Use strategies" msgstr "Stratégies personnalisées" #: ../../source/how-to-use-strategies.rst:4 +#, fuzzy msgid "" "Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." +"``Strategy`` abstraction. A number of built-in strategies are provided in" +" the core framework." msgstr "" "Flower permet une personnalisation complète du processus d'apprentissage " "grâce à l'abstraction :code:`Stratégie`. Un certain nombre de stratégies " "intégrées sont fournies dans le cadre principal." -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-use-strategies.rst:7 msgid "" "There are three ways to customize the way Flower orchestrates the " "learning process on the server side:" @@ -8858,25 +9110,26 @@ msgstr "" "Il y a trois façons de personnaliser la manière dont Flower orchestre le " "processus d'apprentissage du côté du serveur :" -#: ../../source/how-to-use-strategies.rst:8 -msgid "Use an existing strategy, for example, :code:`FedAvg`" +#: ../../source/how-to-use-strategies.rst:10 +#, fuzzy +msgid "Use an existing strategy, for example, ``FedAvg``" msgstr "Utilise une stratégie existante, par exemple :code:`FedAvg`" -#: ../../source/how-to-use-strategies.rst:9 -#: ../../source/how-to-use-strategies.rst:40 +#: ../../source/how-to-use-strategies.rst:11 +#: ../../source/how-to-use-strategies.rst:43 msgid "Customize an existing strategy with callback functions" msgstr "Personnalise une stratégie existante avec des fonctions de rappel" -#: ../../source/how-to-use-strategies.rst:10 -#: ../../source/how-to-use-strategies.rst:87 +#: ../../source/how-to-use-strategies.rst:12 +#: ../../source/how-to-use-strategies.rst:99 msgid "Implement a novel strategy" msgstr "Mets en place une nouvelle stratégie" -#: ../../source/how-to-use-strategies.rst:14 +#: ../../source/how-to-use-strategies.rst:15 msgid "Use an existing strategy" msgstr "Utilise une stratégie existante" -#: ../../source/how-to-use-strategies.rst:16 +#: ../../source/how-to-use-strategies.rst:17 msgid "" "Flower comes with a number of popular federated learning strategies " "built-in. A built-in strategy can be instantiated as follows:" @@ -8884,18 +9137,19 @@ msgstr "" "Flower intègre un certain nombre de stratégies d'apprentissage fédéré " "populaires. Une stratégie intégrée peut être instanciée comme suit :" -#: ../../source/how-to-use-strategies.rst:25 +#: ../../source/how-to-use-strategies.rst:27 +#, fuzzy msgid "" "This creates a strategy with all parameters left at their default values " -"and passes it to the :code:`start_server` function. It is usually " -"recommended to adjust a few parameters during instantiation:" +"and passes it to the ``start_server`` function. It is usually recommended" +" to adjust a few parameters during instantiation:" msgstr "" "Cela crée une stratégie dont tous les paramètres sont laissés à leur " "valeur par défaut et la transmet à la fonction :code:`start_server`. Il " "est généralement recommandé d'ajuster quelques paramètres lors de " "l'instanciation :" -#: ../../source/how-to-use-strategies.rst:42 +#: ../../source/how-to-use-strategies.rst:45 msgid "" "Existing strategies provide several ways to customize their behaviour. " "Callback functions allow strategies to call user-provided code during " @@ -8905,19 +9159,19 @@ msgstr "" "comportement. Les fonctions de rappel permettent aux stratégies d'appeler" " le code fourni par l'utilisateur pendant l'exécution." -#: ../../source/how-to-use-strategies.rst:45 +#: ../../source/how-to-use-strategies.rst:49 msgid "Configuring client fit and client evaluate" msgstr "Configurer l'adaptation et l'évaluation du client" -#: ../../source/how-to-use-strategies.rst:47 +#: ../../source/how-to-use-strategies.rst:51 +#, fuzzy msgid "" "The server can pass new configuration values to the client each round by " -"providing a function to :code:`on_fit_config_fn`. The provided function " -"will be called by the strategy and must return a dictionary of " -"configuration key values pairs that will be sent to the client. It must " -"return a dictionary of arbitrary configuration values :code:`client.fit`" -" and :code:`client.evaluate` functions during each round of federated " -"learning." +"providing a function to ``on_fit_config_fn``. The provided function will " +"be called by the strategy and must return a dictionary of configuration " +"key values pairs that will be sent to the client. It must return a " +"dictionary of arbitrary configuration values ``client.fit`` and " +"``client.evaluate`` functions during each round of federated learning." msgstr "" "Le serveur peut transmettre de nouvelles valeurs de configuration au " "client à chaque tour en fournissant une fonction à " @@ -8928,14 +9182,14 @@ msgstr "" "et :code:`client.evaluate` au cours de chaque tour d'apprentissage " "fédéré." -#: ../../source/how-to-use-strategies.rst:75 +#: ../../source/how-to-use-strategies.rst:84 #, fuzzy msgid "" -"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " +"The ``on_fit_config_fn`` can be used to pass arbitrary configuration " "values from server to client, and potentially change these values each " "round, for example, to adjust the learning rate. The client will receive " -"the dictionary returned by the :code:`on_fit_config_fn` in its own " -":code:`client.fit()` function." +"the dictionary returned by the ``on_fit_config_fn`` in its own " +"``client.fit()`` function." msgstr "" "Le :code:`on_fit_config_fn` peut être utilisé pour passer des valeurs de " "configuration arbitraires du serveur au client, et changer poétiquement " @@ -8943,29 +9197,30 @@ msgstr "" "d'apprentissage. Le client recevra le dictionnaire renvoyé par le " ":code:`on_fit_config_fn` dans sa propre fonction :code:`client.fit()`." -#: ../../source/how-to-use-strategies.rst:78 +#: ../../source/how-to-use-strategies.rst:89 +#, fuzzy msgid "" -"Similar to :code:`on_fit_config_fn`, there is also " -":code:`on_evaluate_config_fn` to customize the configuration sent to " -":code:`client.evaluate()`" +"Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` " +"to customize the configuration sent to ``client.evaluate()``" msgstr "" "Comme pour :code:`on_fit_config_fn`, il existe aussi " ":code:`on_evaluate_config_fn` pour personnaliser la configuration envoyée" " à :code:`client.evaluate()`" -#: ../../source/how-to-use-strategies.rst:81 +#: ../../source/how-to-use-strategies.rst:93 msgid "Configuring server-side evaluation" msgstr "Configuration de l'évaluation côté serveur" -#: ../../source/how-to-use-strategies.rst:83 +#: ../../source/how-to-use-strategies.rst:95 +#, fuzzy msgid "" "Server-side evaluation can be enabled by passing an evaluation function " -"to :code:`evaluate_fn`." +"to ``evaluate_fn``." msgstr "" "L'évaluation côté serveur peut être activée en passant une fonction " "d'évaluation à :code:`evaluate_fn`." -#: ../../source/how-to-use-strategies.rst:89 +#: ../../source/how-to-use-strategies.rst:101 #, fuzzy msgid "" "Writing a fully custom strategy is a bit more involved, but it provides " @@ -8985,15 +9240,15 @@ msgstr "Tutoriel" msgid "Quickstart tutorials" msgstr "Quickstart tutorials" -#: ../../source/index.rst:75 ../../source/index.rst:79 +#: ../../source/index.rst:81 ../../source/index.rst:85 msgid "How-to guides" msgstr "Guides" -#: ../../source/index.rst:100 +#: ../../source/index.rst:106 msgid "Legacy example guides" msgstr "" -#: ../../source/index.rst:108 ../../source/index.rst:112 +#: ../../source/index.rst:114 ../../source/index.rst:119 msgid "Explanations" msgstr "Explications" @@ -9001,26 +9256,26 @@ msgstr "Explications" msgid "API reference" msgstr "Référence pour l'API" -#: ../../source/index.rst:138 +#: ../../source/index.rst:145 msgid "Reference docs" msgstr "Référence pour la documentation" -#: ../../source/index.rst:154 +#: ../../source/index.rst:160 #, fuzzy msgid "Contributor tutorials" msgstr "Configuration du contributeur" -#: ../../source/index.rst:161 +#: ../../source/index.rst:167 #, fuzzy msgid "Contributor how-to guides" msgstr "Guide pour les contributeurs" -#: ../../source/index.rst:173 +#: ../../source/index.rst:179 #, fuzzy msgid "Contributor explanations" msgstr "Explications" -#: ../../source/index.rst:179 +#: ../../source/index.rst:185 #, fuzzy msgid "Contributor references" msgstr "Configuration du contributeur" @@ -9058,7 +9313,7 @@ msgstr "" " chercheurs, ingénieurs, étudiants, professionnels, académiques, et " "autres hobbyistes." -#: ../../source/index.rst:15 +#: ../../source/index.rst:16 msgid "Join us on Slack" msgstr "Join us on Slack" @@ -9112,16 +9367,16 @@ msgstr "" "` | :ref:`Android ` | :ref:`iOS " "`" -#: ../../source/index.rst:64 +#: ../../source/index.rst:70 msgid "We also made video tutorials for PyTorch:" msgstr "" -#: ../../source/index.rst:69 +#: ../../source/index.rst:75 #, fuzzy msgid "And TensorFlow:" msgstr "Exemples de TensorFlow" -#: ../../source/index.rst:77 +#: ../../source/index.rst:83 msgid "" "Problem-oriented how-to guides show step-by-step how to achieve a " "specific goal." @@ -9129,7 +9384,7 @@ msgstr "" "Guides orientés sur la résolutions étapes par étapes de problèmes ou " "objectifs specifiques." -#: ../../source/index.rst:110 +#: ../../source/index.rst:116 msgid "" "Understanding-oriented concept guides explain and discuss key topics and " "underlying ideas behind Flower and collaborative AI." @@ -9137,29 +9392,29 @@ msgstr "" "Guides orientés sur la compréhension et l'explication des sujets et idées" " de fonds sur lesquels sont construits Flower et l'IA collaborative." -#: ../../source/index.rst:121 +#: ../../source/index.rst:128 #, fuzzy msgid "References" msgstr "Référence" -#: ../../source/index.rst:123 +#: ../../source/index.rst:130 msgid "Information-oriented API reference and other reference material." msgstr "Référence de l'API orientée sur l'information pure." -#: ../../source/index.rst:132::1 +#: ../../source/index.rst:139::1 msgid ":py:obj:`flwr `\\" msgstr "" -#: ../../source/index.rst:132::1 flwr:1 of +#: ../../source/index.rst:139::1 flwr:1 of msgid "Flower main package." msgstr "" -#: ../../source/index.rst:149 +#: ../../source/index.rst:155 #, fuzzy msgid "Contributor docs" msgstr "Configuration du contributeur" -#: ../../source/index.rst:151 +#: ../../source/index.rst:157 #, fuzzy msgid "" "The Flower community welcomes contributions. The following docs are " @@ -9271,6 +9526,10 @@ msgstr "Amélioration de la documentation" msgid "Optional argument" msgstr "Améliorations facultatives" +#: ../../flwr install:1 +msgid "The source FAB file to install." +msgstr "" + #: ../../flwr log:1 msgid "Get logs from a Flower project run." msgstr "" @@ -9279,7 +9538,7 @@ msgstr "" msgid "Flag to stream or print logs from the Flower run" msgstr "" -#: ../../flwr log +#: ../../flwr log run #, fuzzy msgid "default" msgstr "Flux de travail" @@ -9293,6 +9552,19 @@ msgstr "" msgid "Required argument" msgstr "Amélioration de la documentation" +#: ../../flwr log:1 +#, fuzzy +msgid "The Flower run ID to query" +msgstr "Rejoignez la communauté de Flower" + +#: ../../flwr log:1 +msgid "Path of the Flower project to run" +msgstr "" + +#: ../../flwr log:1 +msgid "Name of the federation to run the app on" +msgstr "" + #: ../../flwr new:1 #, fuzzy msgid "Create new Flower App." @@ -9317,6 +9589,11 @@ msgstr "" msgid "The Flower username of the author" msgstr "" +#: ../../flwr new:1 +#, fuzzy +msgid "The name of the Flower App" +msgstr "Chargement des données" + #: ../../flwr run:1 #, fuzzy msgid "Run Flower App." @@ -9338,6 +9615,26 @@ msgid "" " the `pyproject.toml` in order to be properly overriden." msgstr "" +#: ../../flwr run:1 +msgid "" +"Use `--stream` with `flwr run` to display logs; logs are not streamed by " +"default." +msgstr "" + +#: ../../flwr run:1 +#, fuzzy +msgid "``False``" +msgstr ":code:`évaluer`" + +#: ../../flwr run:1 +#, fuzzy +msgid "Path of the Flower App to run." +msgstr "Chargement des données" + +#: ../../flwr run:1 +msgid "Name of the federation to run the app on." +msgstr "" + #: ../../source/ref-api-cli.rst:16 #, fuzzy msgid "flower-simulation" @@ -9357,17 +9654,16 @@ msgstr "Serveur de Flower" msgid "flower-server-app" msgstr "flower-driver-api" -#: ../../source/ref-api-cli.rst:49 +#: ../../source/ref-api-cli.rst:50 msgid "" -"Note that since version :code:`1.11.0`, :code:`flower-server-app` no " -"longer supports passing a reference to a `ServerApp` attribute. Instead, " -"you need to pass the path to Flower app via the argument :code:`--app`. " -"This is the path to a directory containing a `pyproject.toml`. You can " -"create a valid Flower app by executing :code:`flwr new` and following the" -" prompt." +"Note that since version ``1.11.0``, ``flower-server-app`` no longer " +"supports passing a reference to a `ServerApp` attribute. Instead, you " +"need to pass the path to Flower app via the argument ``--app``. This is " +"the path to a directory containing a `pyproject.toml`. You can create a " +"valid Flower app by executing ``flwr new`` and following the prompt." msgstr "" -#: ../../source/ref-api-cli.rst:62 +#: ../../source/ref-api-cli.rst:64 #, fuzzy msgid "flower-superexec" msgstr "flower-superlink" @@ -23657,13 +23953,16 @@ msgstr "" "tels que `PyTorch `_ ou `TensorFlow " "`_." -#: ../../source/ref-example-projects.rst:10 +#: ../../source/ref-example-projects.rst:9 #, fuzzy -msgid "" -"The following examples are available as standalone projects. Quickstart " -"TensorFlow/Keras ---------------------------" +msgid "The following examples are available as standalone projects." msgstr "Les exemples suivants sont disponibles sous forme de projets autonomes." +#: ../../source/ref-example-projects.rst:12 +#, fuzzy +msgid "Quickstart TensorFlow/Keras" +msgstr "Démarrage rapide de TensorFlow" + #: ../../source/ref-example-projects.rst:14 msgid "" "The TensorFlow/Keras quickstart example shows CIFAR-10 image " @@ -23683,14 +23982,14 @@ msgstr "" "`_" -#: ../../source/ref-example-projects.rst:18 +#: ../../source/ref-example-projects.rst:19 #, fuzzy msgid ":doc:`Quickstart TensorFlow (Tutorial) `" msgstr "" "`Quickstart TensorFlow (Tutorial) `_" -#: ../../source/ref-example-projects.rst:19 +#: ../../source/ref-example-projects.rst:20 msgid "" "`Quickstart TensorFlow (Blog Post) `_" @@ -23698,12 +23997,12 @@ msgstr "" "`Quickstart TensorFlow (Blog Post) `_" -#: ../../source/ref-example-projects.rst:23 -#: ../../source/tutorial-quickstart-pytorch.rst:5 +#: ../../source/ref-example-projects.rst:24 +#: ../../source/tutorial-quickstart-pytorch.rst:4 msgid "Quickstart PyTorch" msgstr "Démarrage rapide de PyTorch" -#: ../../source/ref-example-projects.rst:25 +#: ../../source/ref-example-projects.rst:26 msgid "" "The PyTorch quickstart example shows CIFAR-10 image classification with a" " simple Convolutional Neural Network:" @@ -23711,7 +24010,7 @@ msgstr "" "L'exemple de démarrage rapide PyTorch montre la classification d'images " "CIFAR-10 avec un simple réseau neuronal convolutif :" -#: ../../source/ref-example-projects.rst:28 +#: ../../source/ref-example-projects.rst:29 #, fuzzy msgid "" "`Quickstart PyTorch (Code) " @@ -23720,18 +24019,18 @@ msgstr "" "`Quickstart PyTorch (Code) " "`_" -#: ../../source/ref-example-projects.rst:29 +#: ../../source/ref-example-projects.rst:31 #, fuzzy msgid ":doc:`Quickstart PyTorch (Tutorial) `" msgstr "" "`Quickstart PyTorch (Tutorial) `_" -#: ../../source/ref-example-projects.rst:33 +#: ../../source/ref-example-projects.rst:34 msgid "PyTorch: From Centralized To Federated" msgstr "PyTorch : De la centralisation à la fédération" -#: ../../source/ref-example-projects.rst:35 +#: ../../source/ref-example-projects.rst:36 msgid "" "This example shows how a regular PyTorch project can be federated using " "Flower:" @@ -23739,7 +24038,7 @@ msgstr "" "Cet exemple montre comment un projet PyTorch ordinaire peut être fédéré à" " l'aide de Flower :" -#: ../../source/ref-example-projects.rst:37 +#: ../../source/ref-example-projects.rst:38 #, fuzzy msgid "" "`PyTorch: From Centralized To Federated (Code) " @@ -23750,7 +24049,7 @@ msgstr "" "`_" -#: ../../source/ref-example-projects.rst:38 +#: ../../source/ref-example-projects.rst:40 #, fuzzy msgid "" ":doc:`PyTorch: From Centralized To Federated (Tutorial) `_" -#: ../../source/ref-example-projects.rst:42 +#: ../../source/ref-example-projects.rst:44 msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" msgstr "Apprentissage fédéré sur Raspberry Pi et Nvidia Jetson" -#: ../../source/ref-example-projects.rst:44 +#: ../../source/ref-example-projects.rst:46 msgid "" "This example shows how Flower can be used to build a federated learning " "system that run across Raspberry Pi and Nvidia Jetson:" @@ -23773,7 +24072,7 @@ msgstr "" "système d'apprentissage fédéré qui fonctionne sur Raspberry Pi et Nvidia " "Jetson :" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-example-projects.rst:49 #, fuzzy msgid "" "`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " @@ -23782,7 +24081,7 @@ msgstr "" "`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Code) " "`_" -#: ../../source/ref-example-projects.rst:47 +#: ../../source/ref-example-projects.rst:51 msgid "" "`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " "`_" @@ -23805,7 +24104,7 @@ msgstr "" ":fa:`eye,mr-1` Flower peut-il fonctionner sur les ordinateurs portables " "Juptyter / Google Colab ?" -#: ../../source/ref-faq.rst:8 +#: ../../source/ref-faq.rst:9 msgid "" "Yes, it can! Flower even comes with a few under-the-hood optimizations to" " make it work even better on Colab. Here's a quickstart example:" @@ -23814,7 +24113,7 @@ msgstr "" "pour qu'il fonctionne encore mieux sur Colab. Voici un exemple de " "démarrage rapide :" -#: ../../source/ref-faq.rst:10 +#: ../../source/ref-faq.rst:11 #, fuzzy msgid "" "`Flower simulation PyTorch " @@ -23824,7 +24123,7 @@ msgstr "" "`Flower Quickstart (TensorFlow/Keras) " "`_" -#: ../../source/ref-faq.rst:11 +#: ../../source/ref-faq.rst:12 #, fuzzy msgid "" "`Flower simulation TensorFlow/Keras " @@ -23840,7 +24139,7 @@ msgstr "" ":fa:`eye,mr-1` Comment puis-je faire fonctionner l'apprentissage fédéré " "sur un Raspberry Pi ?" -#: ../../source/ref-faq.rst:15 +#: ../../source/ref-faq.rst:16 #, fuzzy msgid "" "Find the `blog post about federated learning on embedded device here " @@ -23859,7 +24158,7 @@ msgstr "" ":fa:`eye,mr-1` Est-ce que Flower prend en charge l'apprentissage fédéré " "sur les appareils Android ?" -#: ../../source/ref-faq.rst:19 +#: ../../source/ref-faq.rst:20 #, fuzzy msgid "" "Yes, it does. Please take a look at our `blog post " @@ -23871,13 +24170,13 @@ msgstr "" "with-flower>`_ ou consultez l'`exemple de code Android sur GitHub " "`_." -#: ../../source/ref-faq.rst:21 +#: ../../source/ref-faq.rst:22 msgid "" "`Android Kotlin example `_" msgstr "" -#: ../../source/ref-faq.rst:22 +#: ../../source/ref-faq.rst:23 msgid "`Android Java example `_" msgstr "" @@ -23887,7 +24186,7 @@ msgstr "" ":fa:`eye,mr-1` Puis-je combiner l'apprentissage fédéré avec la blockchain" " ?" -#: ../../source/ref-faq.rst:26 +#: ../../source/ref-faq.rst:27 msgid "" "Yes, of course. A list of available examples using Flower within a " "blockchain environment is available here:" @@ -23895,20 +24194,20 @@ msgstr "" "Oui, bien sûr, une liste d'exemples disponibles utilisant Flower dans un " "environnement blockchain est disponible ici :" -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-faq.rst:30 msgid "`FLock: A Decentralised AI Training Platform `_." msgstr "" -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-faq.rst:30 msgid "Contribute to on-chain training the model and earn rewards." msgstr "" -#: ../../source/ref-faq.rst:30 +#: ../../source/ref-faq.rst:31 #, fuzzy msgid "Local blockchain with federated learning simulation." msgstr "Mise à l'échelle de l'apprentissage fédéré" -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-faq.rst:32 msgid "" "`Flower meets Nevermined GitHub Repository `_." @@ -23916,7 +24215,7 @@ msgstr "" "`Flower meets Nevermined GitHub Repository `_." -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-faq.rst:33 msgid "" "`Flower meets Nevermined YouTube video " "`_." @@ -23924,7 +24223,7 @@ msgstr "" "`Flower rencontre Nevermined vidéo YouTube " "`_." -#: ../../source/ref-faq.rst:33 +#: ../../source/ref-faq.rst:34 #, fuzzy msgid "" "`Flower meets KOSMoS `_." -#: ../../source/ref-faq.rst:34 +#: ../../source/ref-faq.rst:35 msgid "" "`Flower meets Talan blog post `_ ." -#: ../../source/ref-faq.rst:35 +#: ../../source/ref-faq.rst:36 msgid "" "`Flower meets Talan GitHub Repository " "`_ ." @@ -24244,12 +24543,12 @@ msgid "" "app using Flower." msgstr "" -#: ../../source/tutorial-quickstart-android.rst:5 +#: ../../source/tutorial-quickstart-android.rst:4 #, fuzzy msgid "Quickstart Android" msgstr "Démarrage rapide des Pandas" -#: ../../source/tutorial-quickstart-android.rst:10 +#: ../../source/tutorial-quickstart-android.rst:9 #, fuzzy msgid "" "Let's build a federated learning system using TFLite and Flower on " @@ -24258,7 +24557,7 @@ msgstr "" "Construisons un système d'apprentissage fédéré en utilisant fastai et " "Flower !" -#: ../../source/tutorial-quickstart-android.rst:12 +#: ../../source/tutorial-quickstart-android.rst:11 #, fuzzy msgid "" "Please refer to the `full code example " @@ -24269,11 +24568,11 @@ msgstr "" "`_ " "pour en savoir plus." -#: ../../source/tutorial-quickstart-fastai.rst:5 +#: ../../source/tutorial-quickstart-fastai.rst:4 msgid "Quickstart fastai" msgstr "Démarrage rapide fastai" -#: ../../source/tutorial-quickstart-fastai.rst:7 +#: ../../source/tutorial-quickstart-fastai.rst:6 #, fuzzy msgid "" "In this federated learning tutorial we will learn how to train a " @@ -24285,24 +24584,24 @@ msgstr "" "tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-quickstart-fastai.rst:12 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +#: ../../source/tutorial-quickstart-fastai.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 msgid "Then, clone the code example directly from GitHub:" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:20 +#: ../../source/tutorial-quickstart-fastai.rst:18 msgid "" "This will create a new directory called `quickstart-fastai` containing " "the following files:" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:33 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:33 +#: ../../source/tutorial-quickstart-fastai.rst:31 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 #, fuzzy msgid "Next, activate your environment, then run:" msgstr "et active l'environnement virtuel avec :" -#: ../../source/tutorial-quickstart-fastai.rst:43 +#: ../../source/tutorial-quickstart-fastai.rst:41 msgid "" "This example by default runs the Flower Simulation Engine, creating a " "federation of 10 nodes using `FedAvg `_." -#: ../../source/tutorial-quickstart-huggingface.rst:14 +#: ../../source/tutorial-quickstart-huggingface.rst:12 msgid "" "Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " "project. It will generate all the files needed to run, by default with " @@ -24378,10 +24677,10 @@ msgid "" "|iidpartitioner|_." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:20 -#: ../../source/tutorial-quickstart-mlx.rst:19 -#: ../../source/tutorial-quickstart-pytorch.rst:19 -#: ../../source/tutorial-quickstart-tensorflow.rst:20 +#: ../../source/tutorial-quickstart-huggingface.rst:17 +#: ../../source/tutorial-quickstart-mlx.rst:17 +#: ../../source/tutorial-quickstart-pytorch.rst:18 +#: ../../source/tutorial-quickstart-tensorflow.rst:18 #, fuzzy msgid "" "Now that we have a rough idea of what this example is about, let's get " @@ -24391,65 +24690,65 @@ msgstr "" "commençons. Nous devons d'abord installer Flower. Tu peux le faire en " "lançant :" -#: ../../source/tutorial-quickstart-huggingface.rst:28 +#: ../../source/tutorial-quickstart-huggingface.rst:25 msgid "" "Then, run the command below. You will be prompted to select one of the " "available templates (choose ``HuggingFace``), give a name to your " "project, and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:36 -#: ../../source/tutorial-quickstart-mlx.rst:35 -#: ../../source/tutorial-quickstart-pytorch.rst:35 -#: ../../source/tutorial-quickstart-tensorflow.rst:36 +#: ../../source/tutorial-quickstart-huggingface.rst:33 +#: ../../source/tutorial-quickstart-mlx.rst:32 +#: ../../source/tutorial-quickstart-pytorch.rst:34 +#: ../../source/tutorial-quickstart-tensorflow.rst:34 msgid "" "After running it you'll notice a new directory with your project name has" " been created. It should have the following structure:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:50 -#: ../../source/tutorial-quickstart-mlx.rst:49 -#: ../../source/tutorial-quickstart-pytorch.rst:49 -#: ../../source/tutorial-quickstart-tensorflow.rst:50 +#: ../../source/tutorial-quickstart-huggingface.rst:47 +#: ../../source/tutorial-quickstart-mlx.rst:46 +#: ../../source/tutorial-quickstart-pytorch.rst:48 +#: ../../source/tutorial-quickstart-tensorflow.rst:48 msgid "" "If you haven't yet installed the project and its dependencies, you can do" " so by:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:58 -#: ../../source/tutorial-quickstart-pytorch.rst:57 -#: ../../source/tutorial-quickstart-tensorflow.rst:58 +#: ../../source/tutorial-quickstart-huggingface.rst:54 +#: ../../source/tutorial-quickstart-pytorch.rst:55 +#: ../../source/tutorial-quickstart-tensorflow.rst:55 msgid "To run the project, do:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:106 +#: ../../source/tutorial-quickstart-huggingface.rst:102 msgid "You can also run the project with GPU as follows:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:113 +#: ../../source/tutorial-quickstart-huggingface.rst:109 msgid "" "This will use the default arguments where each ``ClientApp`` will use 2 " "CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:124 -#: ../../source/tutorial-quickstart-mlx.rst:114 -#: ../../source/tutorial-quickstart-pytorch.rst:113 +#: ../../source/tutorial-quickstart-huggingface.rst:120 +#: ../../source/tutorial-quickstart-mlx.rst:110 +#: ../../source/tutorial-quickstart-pytorch.rst:111 msgid "" "What follows is an explanation of each component in the project you just " "created: dataset partition, the model, defining the ``ClientApp`` and " "defining the ``ServerApp``." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:130 -#: ../../source/tutorial-quickstart-mlx.rst:120 -#: ../../source/tutorial-quickstart-pytorch.rst:119 -#: ../../source/tutorial-quickstart-tensorflow.rst:116 +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 #, fuzzy msgid "The Data" msgstr "Chargement des données" -#: ../../source/tutorial-quickstart-huggingface.rst:132 +#: ../../source/tutorial-quickstart-huggingface.rst:126 msgid "" "This tutorial uses |flowerdatasets|_ to easily download and partition the" " `IMDB `_ dataset. In " @@ -24462,15 +24761,15 @@ msgid "" "their data partition." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:178 -#: ../../source/tutorial-quickstart-mlx.rst:164 -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#: ../../source/tutorial-quickstart-tensorflow.rst:145 +#: ../../source/tutorial-quickstart-huggingface.rst:171 +#: ../../source/tutorial-quickstart-mlx.rst:155 +#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-quickstart-tensorflow.rst:139 #, fuzzy msgid "The Model" msgstr "Entraîne le modèle" -#: ../../source/tutorial-quickstart-huggingface.rst:180 +#: ../../source/tutorial-quickstart-huggingface.rst:173 #, fuzzy msgid "" "We will leverage 🤗 Hugging Face to federate the training of language " @@ -24487,13 +24786,13 @@ msgstr "" "données d'évaluations IMDB. L'objectif final est de détecter si " "l'évaluation d'un film est positive ou négative." -#: ../../source/tutorial-quickstart-huggingface.rst:193 +#: ../../source/tutorial-quickstart-huggingface.rst:185 msgid "" "Note that here, ``model_name`` is a string that will be loaded from the " "``Context`` in the ClientApp and ServerApp." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:196 +#: ../../source/tutorial-quickstart-huggingface.rst:188 msgid "" "In addition to loading the pretrained model weights and architecture, we " "also include two utility functions to perform both training (i.e. " @@ -24506,15 +24805,15 @@ msgid "" "perform local training or evaluation:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:239 -#: ../../source/tutorial-quickstart-mlx.rst:210 -#: ../../source/tutorial-quickstart-pytorch.rst:234 -#: ../../source/tutorial-quickstart-tensorflow.rst:176 +#: ../../source/tutorial-quickstart-huggingface.rst:228 +#: ../../source/tutorial-quickstart-mlx.rst:199 +#: ../../source/tutorial-quickstart-pytorch.rst:224 +#: ../../source/tutorial-quickstart-tensorflow.rst:168 #, fuzzy msgid "The ClientApp" msgstr "client" -#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-huggingface.rst:230 msgid "" "The main changes we have to make to use 🤗 Hugging Face with Flower will " "be found in the ``get_weights()`` and ``set_weights()`` functions. Under " @@ -24527,8 +24826,8 @@ msgid "" "them to an existing PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:254 -#: ../../source/tutorial-quickstart-pytorch.rst:245 +#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-pytorch.rst:234 msgid "" "The specific implementation of ``get_weights()`` and ``set_weights()`` " "depends on the type of models you use. The ones shown below work for a " @@ -24536,8 +24835,8 @@ msgid "" "have more exotic model architectures." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:269 -#: ../../source/tutorial-quickstart-pytorch.rst:261 +#: ../../source/tutorial-quickstart-huggingface.rst:257 +#: ../../source/tutorial-quickstart-pytorch.rst:250 msgid "" "The rest of the functionality is directly inspired by the centralized " "case. The ``fit()`` method in the client trains the model using the local" @@ -24545,7 +24844,7 @@ msgid "" "model received on a held-out validation set that the client might have:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:296 +#: ../../source/tutorial-quickstart-huggingface.rst:283 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " "defined above by means of a ``client_fn()`` callback. Note that the " @@ -24556,15 +24855,15 @@ msgid "" "additional hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:330 -#: ../../source/tutorial-quickstart-mlx.rst:376 -#: ../../source/tutorial-quickstart-pytorch.rst:321 -#: ../../source/tutorial-quickstart-tensorflow.rst:245 +#: ../../source/tutorial-quickstart-huggingface.rst:316 +#: ../../source/tutorial-quickstart-mlx.rst:361 +#: ../../source/tutorial-quickstart-pytorch.rst:307 +#: ../../source/tutorial-quickstart-tensorflow.rst:232 #, fuzzy msgid "The ServerApp" msgstr "serveur" -#: ../../source/tutorial-quickstart-huggingface.rst:332 +#: ../../source/tutorial-quickstart-huggingface.rst:318 msgid "" "To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" " identical signature to that of ``client_fn()`` but the return type is " @@ -24575,13 +24874,13 @@ msgid "" "value defined in the ``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:371 +#: ../../source/tutorial-quickstart-huggingface.rst:356 msgid "" "Congratulations! You've successfully built and run your first federated " "learning system for an LLM." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:376 +#: ../../source/tutorial-quickstart-huggingface.rst:361 msgid "" "Check the source code of the extended version of this tutorial in " "|quickstart_hf_link|_ in the Flower GitHub repository. For a " @@ -24595,12 +24894,12 @@ msgid "" "using Flower to train a neural network on MNIST." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:5 +#: ../../source/tutorial-quickstart-ios.rst:4 #, fuzzy msgid "Quickstart iOS" msgstr "Démarrage rapide XGBoost" -#: ../../source/tutorial-quickstart-ios.rst:10 +#: ../../source/tutorial-quickstart-ios.rst:9 #, fuzzy msgid "" "In this tutorial we will learn how to train a Neural Network on MNIST " @@ -24621,7 +24920,7 @@ msgstr "" "tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-quickstart-ios.rst:15 +#: ../../source/tutorial-quickstart-ios.rst:17 #, fuzzy msgid "" "Our example consists of one Python *server* and two iPhone *clients* that" @@ -24630,7 +24929,7 @@ msgstr "" "Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " "même modèle." -#: ../../source/tutorial-quickstart-ios.rst:17 +#: ../../source/tutorial-quickstart-ios.rst:20 #, fuzzy msgid "" "*Clients* are responsible for generating individual weight updates for " @@ -24646,7 +24945,7 @@ msgstr "" "cette version améliorée du modèle à chaque *client*. Un cycle complet de " "mises à jour de poids s'appelle un *round*." -#: ../../source/tutorial-quickstart-ios.rst:21 +#: ../../source/tutorial-quickstart-ios.rst:26 #, fuzzy msgid "" "Now that we have a rough idea of what is going on, let's get started to " @@ -24657,17 +24956,17 @@ msgstr "" "commençons. Nous devons d'abord installer Flower. Tu peux le faire en " "exécutant :" -#: ../../source/tutorial-quickstart-ios.rst:27 +#: ../../source/tutorial-quickstart-ios.rst:33 msgid "Or Poetry:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-xgboost.rst:55 +#: ../../source/tutorial-quickstart-ios.rst:40 +#: ../../source/tutorial-quickstart-scikitlearn.rst:43 +#: ../../source/tutorial-quickstart-xgboost.rst:65 msgid "Flower Client" msgstr "Client de la fleur" -#: ../../source/tutorial-quickstart-ios.rst:36 +#: ../../source/tutorial-quickstart-ios.rst:42 msgid "" "Now that we have all our dependencies installed, let's run a simple " "distributed training using CoreML as our local training pipeline and " @@ -24676,88 +24975,88 @@ msgid "" "the Swift SDK. The client implementation can be seen below:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:72 +#: ../../source/tutorial-quickstart-ios.rst:80 msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a" -" dependency in your project. For our application, we will store the logic" -" of our app in :code:`FLiOSModel.swift` and the UI elements in " -":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" -" in this quickstart. Please refer to the `full code example " +"Let's create a new application project in Xcode and add ``flwr`` as a " +"dependency in your project. For our application, we will store the logic " +"of our app in ``FLiOSModel.swift`` and the UI elements in " +"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" +" quickstart. Please refer to the `full code example " "`_ to learn more " "about the app." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:75 -msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#: ../../source/tutorial-quickstart-ios.rst:86 +msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:83 +#: ../../source/tutorial-quickstart-ios.rst:94 msgid "" "Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " "will be bundled inside the application during deployment to your iOS " "device. We need to pass the url to access mlmodel and run CoreML machine " "learning processes, it can be retrieved by calling the function " -":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " -"into :code:`MLBatchProvider` object. The preprocessing is done inside " -":code:`DataLoader.swift`." +"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" +" ``MLBatchProvider`` object. The preprocessing is done inside " +"``DataLoader.swift``." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:99 +#: ../../source/tutorial-quickstart-ios.rst:112 msgid "" "Since CoreML does not allow the model parameters to be seen before " "training, and accessing the model parameters during or after the training" " can only be done by specifying the layer name, we need to know this " "information beforehand, through looking at the model specification, which" " are written as proto files. The implementation can be seen in " -":code:`MLModelInspect`." +"``MLModelInspect``." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:102 +#: ../../source/tutorial-quickstart-ios.rst:118 msgid "" "After we have all of the necessary information, let's create our Flower " "client." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:117 +#: ../../source/tutorial-quickstart-ios.rst:133 msgid "" "Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function :code:`startFlwrGRPC`." +"by passing our Flower client to the function ``startFlwrGRPC``." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:124 +#: ../../source/tutorial-quickstart-ios.rst:141 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " -"The attribute :code:`hostname` and :code:`port` tells the client which " -"server to connect to. This can be done by entering the hostname and port " -"in the application before clicking the start button to start the " -"federated learning process." +"That's it for the client. We only have to implement ``Client`` or call " +"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" +" ``hostname`` and ``port`` tells the client which server to connect to. " +"This can be done by entering the hostname and port in the application " +"before clicking the start button to start the federated learning process." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-xgboost.rst:341 +#: ../../source/tutorial-quickstart-ios.rst:148 +#: ../../source/tutorial-quickstart-scikitlearn.rst:179 +#: ../../source/tutorial-quickstart-xgboost.rst:358 msgid "Flower Server" msgstr "Serveur de Flower" -#: ../../source/tutorial-quickstart-ios.rst:131 +#: ../../source/tutorial-quickstart-ios.rst:150 +#, fuzzy msgid "" "For simple workloads we can start a Flower server and leave all the " "configuration possibilities at their default values. In a file named " -":code:`server.py`, import Flower and start the server:" +"``server.py``, import Flower and start the server:" msgstr "" "Pour les charges de travail simples, nous pouvons démarrer un serveur " "Flower et laisser toutes les possibilités de configuration à leurs " "valeurs par défaut. Dans un fichier nommé :code:`server.py`, importe " "Flower et démarre le serveur :" -#: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-scikitlearn.rst:230 +#: ../../source/tutorial-quickstart-ios.rst:161 +#: ../../source/tutorial-quickstart-scikitlearn.rst:254 msgid "Train the model, federated!" msgstr "Entraîne le modèle, fédéré !" -#: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-xgboost.rst:567 +#: ../../source/tutorial-quickstart-ios.rst:163 +#: ../../source/tutorial-quickstart-xgboost.rst:590 msgid "" "With both client and server ready, we can now run everything and see " "federated learning in action. FL systems usually have a server and " @@ -24768,7 +25067,7 @@ msgstr "" "généralement un serveur et plusieurs clients. Nous devons donc commencer " "par démarrer le serveur :" -#: ../../source/tutorial-quickstart-ios.rst:152 +#: ../../source/tutorial-quickstart-ios.rst:171 msgid "" "Once the server is running we can start the clients in different " "terminals. Build and run the client through your Xcode, one through Xcode" @@ -24778,13 +25077,13 @@ msgid "" "simulator-or-on-a-device>`_." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:156 +#: ../../source/tutorial-quickstart-ios.rst:177 #, fuzzy msgid "" "Congratulations! You've successfully built and run your first federated " "learning system in your ios device. The full `source code " "`_ for this " -"example can be found in :code:`examples/ios`." +"example can be found in ``examples/ios``." msgstr "" "Félicitations ! Tu as réussi à construire et à faire fonctionner ton " "premier système d'apprentissage fédéré. Le code source complet " @@ -24798,11 +25097,11 @@ msgid "" "with Jax to train a linear regression model on a scikit-learn dataset." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:5 +#: ../../source/tutorial-quickstart-jax.rst:4 msgid "Quickstart JAX" msgstr "Démarrage rapide de JAX" -#: ../../source/tutorial-quickstart-jax.rst:10 +#: ../../source/tutorial-quickstart-jax.rst:9 #, fuzzy msgid "" "This tutorial will show you how to use Flower to build a federated " @@ -24829,41 +25128,44 @@ msgstr "" " Ensuite, nous nous appuyons sur le code d'entraînement centralisé pour " "exécuter l'entraînement de manière fédérée." -#: ../../source/tutorial-quickstart-jax.rst:16 +#: ../../source/tutorial-quickstart-jax.rst:20 +#, fuzzy msgid "" "Before we start building our JAX example, we need install the packages " -":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" +"``jax``, ``jaxlib``, ``scikit-learn``, and ``flwr``:" msgstr "" "Avant de commencer à construire notre exemple JAX, nous devons installer " "les paquets :code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, et " ":code:`flwr` :" -#: ../../source/tutorial-quickstart-jax.rst:24 +#: ../../source/tutorial-quickstart-jax.rst:28 msgid "Linear Regression with JAX" msgstr "Régression linéaire avec JAX" -#: ../../source/tutorial-quickstart-jax.rst:26 +#: ../../source/tutorial-quickstart-jax.rst:30 +#, fuzzy msgid "" "We begin with a brief description of the centralized training code based " -"on a :code:`Linear Regression` model. If you want a more in-depth " -"explanation of what's going on then have a look at the official `JAX " -"documentation `_." +"on a ``Linear Regression`` model. If you want a more in-depth explanation" +" of what's going on then have a look at the official `JAX documentation " +"`_." msgstr "" "Nous commençons par une brève description du code d'entraînement " "centralisé basé sur un modèle :code:`Régression linéaire`. Si tu veux une" " explication plus approfondie de ce qui se passe, jette un coup d'œil à " "la documentation officielle `JAX `_." -#: ../../source/tutorial-quickstart-jax.rst:29 +#: ../../source/tutorial-quickstart-jax.rst:34 +#, fuzzy msgid "" -"Let's create a new file called :code:`jax_training.py` with all the " +"Let's create a new file called ``jax_training.py`` with all the " "components required for a traditional (centralized) linear regression " -"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " -"be imported. In addition, we need to import :code:`sklearn` since we use " -":code:`make_regression` for the dataset and :code:`train_test_split` to " -"split the dataset into a training and test set. You can see that we do " -"not yet import the :code:`flwr` package for federated learning. This will" -" be done later." +"training. First, the JAX packages ``jax`` and ``jaxlib`` need to be " +"imported. In addition, we need to import ``sklearn`` since we use " +"``make_regression`` for the dataset and ``train_test_split`` to split the" +" dataset into a training and test set. You can see that we do not yet " +"import the ``flwr`` package for federated learning. This will be done " +"later." msgstr "" "Créons un nouveau fichier appelé :code:`jax_training.py` avec tous les " "composants nécessaires pour un apprentissage traditionnel (centralisé) de" @@ -24875,29 +25177,30 @@ msgstr "" "n'avons pas encore importé le paquet :code:`flwr` pour l'apprentissage " "fédéré, ce qui sera fait plus tard." -#: ../../source/tutorial-quickstart-jax.rst:43 -msgid "" -"The :code:`load_data()` function loads the mentioned training and test " -"sets." +#: ../../source/tutorial-quickstart-jax.rst:51 +#, fuzzy +msgid "The ``load_data()`` function loads the mentioned training and test sets." msgstr "" "La fonction :code:`load_data()` charge les ensembles d'entraînement et de" " test mentionnés." -#: ../../source/tutorial-quickstart-jax.rst:53 +#: ../../source/tutorial-quickstart-jax.rst:63 +#, fuzzy msgid "" -"The model architecture (a very simple :code:`Linear Regression` model) is" -" defined in :code:`load_model()`." +"The model architecture (a very simple ``Linear Regression`` model) is " +"defined in ``load_model()``." msgstr "" "L'architecture du modèle (un modèle :code:`Régression linéaire` très " "simple) est définie dans :code:`load_model()`." -#: ../../source/tutorial-quickstart-jax.rst:65 +#: ../../source/tutorial-quickstart-jax.rst:73 +#, fuzzy msgid "" -"We now need to define the training (function :code:`train()`), which " -"loops over the training set and measures the loss (function " -":code:`loss_fn()`) for each batch of training examples. The loss function" -" is separate since JAX takes derivatives with a :code:`grad()` function " -"(defined in the :code:`main()` function and called in :code:`train()`)." +"We now need to define the training (function ``train()``), which loops " +"over the training set and measures the loss (function ``loss_fn()``) for " +"each batch of training examples. The loss function is separate since JAX " +"takes derivatives with a ``grad()`` function (defined in the ``main()`` " +"function and called in ``train()``)." msgstr "" "Nous devons maintenant définir l'entraînement (fonction :code:`train()`)," " qui boucle sur l'ensemble d'entraînement et mesure la perte (fonction " @@ -24906,22 +25209,24 @@ msgstr "" ":code:`grad()` (définie dans la fonction :code:`main()` et appelée dans " ":code:`train()`)." -#: ../../source/tutorial-quickstart-jax.rst:83 +#: ../../source/tutorial-quickstart-jax.rst:95 +#, fuzzy msgid "" -"The evaluation of the model is defined in the function " -":code:`evaluation()`. The function takes all test examples and measures " -"the loss of the linear regression model." +"The evaluation of the model is defined in the function ``evaluation()``. " +"The function takes all test examples and measures the loss of the linear " +"regression model." msgstr "" "L'évaluation du modèle est définie dans la fonction :code:`evaluation()`." " La fonction prend tous les exemples de test et mesure la perte du modèle" " de régression linéaire." -#: ../../source/tutorial-quickstart-jax.rst:94 +#: ../../source/tutorial-quickstart-jax.rst:107 +#, fuzzy msgid "" "Having defined the data loading, model architecture, training, and " "evaluation we can put everything together and train our model using JAX. " -"As already mentioned, the :code:`jax.grad()` function is defined in " -":code:`main()` and passed to :code:`train()`." +"As already mentioned, the ``jax.grad()`` function is defined in " +"``main()`` and passed to ``train()``." msgstr "" "Après avoir défini le chargement des données, l'architecture du modèle, " "l'entraînement et l'évaluation, nous pouvons tout assembler et entraîner " @@ -24929,13 +25234,13 @@ msgstr "" "fonction :code:`jax.grad()` est définie dans :code:`main()` et transmise " "à :code:`train()`." -#: ../../source/tutorial-quickstart-jax.rst:111 +#: ../../source/tutorial-quickstart-jax.rst:126 msgid "You can now run your (centralized) JAX linear regression workload:" msgstr "" "Tu peux maintenant exécuter ta charge de travail (centralisée) de " "régression linéaire JAX :" -#: ../../source/tutorial-quickstart-jax.rst:117 +#: ../../source/tutorial-quickstart-jax.rst:132 msgid "" "So far this should all look fairly familiar if you've used JAX before. " "Let's take the next step and use what we've built to create a simple " @@ -24946,20 +25251,21 @@ msgstr "" "avons construit pour créer un simple système d'apprentissage fédéré " "composé d'un serveur et de deux clients." -#: ../../source/tutorial-quickstart-jax.rst:121 +#: ../../source/tutorial-quickstart-jax.rst:137 msgid "JAX meets Flower" msgstr "JAX rencontre Flower" -#: ../../source/tutorial-quickstart-jax.rst:123 +#: ../../source/tutorial-quickstart-jax.rst:139 +#, fuzzy msgid "" "The concept of federating an existing workload is always the same and " "easy to understand. We have to start a *server* and then use the code in " -":code:`jax_training.py` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server*, which averages all received " -"parameter updates. This describes one round of the federated learning " -"process, and we repeat this for multiple rounds." +"``jax_training.py`` for the *clients* that are connected to the *server*." +" The *server* sends model parameters to the clients. The *clients* run " +"the training and update the parameters. The updated parameters are sent " +"back to the *server*, which averages all received parameter updates. This" +" describes one round of the federated learning process, and we repeat " +"this for multiple rounds." msgstr "" "Le concept de fédération d'une charge de travail existante est toujours " "le même et facile à comprendre. Nous devons démarrer un *serveur*, puis " @@ -24971,12 +25277,13 @@ msgstr "" "un tour du processus d'apprentissage fédéré, et nous répétons cette " "opération pour plusieurs tours." -#: ../../source/tutorial-quickstart-jax.rst:145 +#: ../../source/tutorial-quickstart-jax.rst:167 +#, fuzzy msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined JAX training in :code:`jax_training.py`. Our" -" *client* needs to import :code:`flwr`, but also :code:`jax` and " -":code:`jaxlib` to update the parameters on our JAX model:" +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined JAX training in ``jax_training.py``. Our " +"*client* needs to import ``flwr``, but also ``jax`` and ``jaxlib`` to " +"update the parameters on our JAX model:" msgstr "" "Enfin, nous allons définir la logique de notre *client* dans " ":code:`client.py` et nous appuyer sur la formation JAX définie " @@ -24984,18 +25291,18 @@ msgstr "" ":code:`flwr`, mais aussi :code:`jax` et :code:`jaxlib` pour mettre à jour" " les paramètres de notre modèle JAX :" -#: ../../source/tutorial-quickstart-jax.rst:160 +#: ../../source/tutorial-quickstart-jax.rst:182 +#, fuzzy msgid "" "Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " -"easier to implement than :code:`Client` if you use a framework with good " -"NumPy interoperability (like JAX) because it avoids some of the " -"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" -" to implement four methods, two methods for getting/setting model " -"parameters, one method for training the model, and one method for testing" -" the model:" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``FlowerClient``. ``NumPyClient`` is slightly easier to implement" +" than ``Client`` if you use a framework with good NumPy interoperability " +"(like JAX) because it avoids some of the boilerplate that would otherwise" +" be necessary. ``FlowerClient`` needs to implement four methods, two " +"methods for getting/setting model parameters, one method for training the" +" model, and one method for testing the model:" msgstr "" "L'implémentation d'un *client* Flower signifie essentiellement " "l'implémentation d'une sous-classe de :code:`flwr.client.Client` ou " @@ -25009,43 +25316,46 @@ msgstr "" "paramètres du modèle, une méthode pour former le modèle, et une méthode " "pour tester le modèle :" -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid ":code:`set_parameters (optional)`" +#: ../../source/tutorial-quickstart-jax.rst:194 +#, fuzzy +msgid "``set_parameters (optional)``" msgstr ":code:`set_parameters (optional)`" -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid "transform parameters to NumPy :code:`ndarray`'s" +#: ../../source/tutorial-quickstart-jax.rst:193 +#, fuzzy +msgid "transform parameters to NumPy ``ndarray``'s" msgstr "transforme les paramètres en NumPy :code:`ndarray`'s" -#: ../../source/tutorial-quickstart-jax.rst:174 +#: ../../source/tutorial-quickstart-jax.rst:203 msgid "get the updated local model parameters and return them to the server" msgstr "" "récupère les paramètres du modèle local mis à jour et les renvoie au " "serveur" -#: ../../source/tutorial-quickstart-jax.rst:178 +#: ../../source/tutorial-quickstart-jax.rst:208 msgid "return the local loss to the server" msgstr "renvoie la perte locale au serveur" -#: ../../source/tutorial-quickstart-jax.rst:180 +#: ../../source/tutorial-quickstart-jax.rst:210 +#, fuzzy msgid "" "The challenging part is to transform the JAX model parameters from " -":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" -" `NumPyClient`." +"``DeviceArray`` to ``NumPy ndarray`` to make them compatible with " +"`NumPyClient`." msgstr "" "La partie la plus difficile consiste à transformer les paramètres du " "modèle JAX de :code:`DeviceArray` en :code:`NumPy ndarray` pour les " "rendre compatibles avec `NumPyClient`." -#: ../../source/tutorial-quickstart-jax.rst:182 +#: ../../source/tutorial-quickstart-jax.rst:213 +#, fuzzy msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`evaluate()` previously " -"defined in :code:`jax_training.py`. So what we really do here is we tell " -"Flower through our :code:`NumPyClient` subclass which of our already " -"defined functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``evaluate()`` previously defined in " +"``jax_training.py``. So what we really do here is we tell Flower through " +"our ``NumPyClient`` subclass which of our already defined functions to " +"call for training and evaluation. We included type annotations to give " +"you a better understanding of the data types that get passed around." msgstr "" "Les deux méthodes :code:`NumPyClient` :code:`fit` et :code:`evaluate` " "utilisent les fonctions :code:`train()` et :code:`evaluate()` définies " @@ -25056,11 +25366,11 @@ msgstr "" "annotations de type pour te donner une meilleure compréhension des types " "de données qui sont transmis." -#: ../../source/tutorial-quickstart-jax.rst:251 +#: ../../source/tutorial-quickstart-jax.rst:286 msgid "Having defined the federation process, we can run it." msgstr "Après avoir défini le processus de fédération, nous pouvons l'exécuter." -#: ../../source/tutorial-quickstart-jax.rst:280 +#: ../../source/tutorial-quickstart-jax.rst:315 msgid "" "in each window (make sure that the server is still running before you do " "so) and see your JAX project run federated learning across two clients. " @@ -25070,7 +25380,7 @@ msgstr "" "d'exécution avant de le faire) et tu verras que ton projet JAX exécute " "l'apprentissage fédéré sur deux clients. Félicitations !" -#: ../../source/tutorial-quickstart-jax.rst:285 +#: ../../source/tutorial-quickstart-jax.rst:321 #, fuzzy msgid "" "The source code of this example was improved over time and can be found " @@ -25084,7 +25394,7 @@ msgstr "" "Notre exemple est quelque peu simplifié à l'extrême car les deux clients " "chargent le même jeu de données." -#: ../../source/tutorial-quickstart-jax.rst:288 +#: ../../source/tutorial-quickstart-jax.rst:325 msgid "" "You're now prepared to explore this topic further. How about using a more" " sophisticated model or using a different dataset? How about adding more " @@ -25094,12 +25404,12 @@ msgstr "" " modèle plus sophistiqué ou un ensemble de données différent ? Pourquoi " "ne pas ajouter d'autres clients ?" -#: ../../source/tutorial-quickstart-mlx.rst:5 +#: ../../source/tutorial-quickstart-mlx.rst:4 #, fuzzy msgid "Quickstart MLX" msgstr "Démarrage rapide de JAX" -#: ../../source/tutorial-quickstart-mlx.rst:7 +#: ../../source/tutorial-quickstart-mlx.rst:6 #, fuzzy msgid "" "In this federated learning tutorial we will learn how to train simple MLP" @@ -25111,7 +25421,7 @@ msgstr "" "tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-quickstart-mlx.rst:12 +#: ../../source/tutorial-quickstart-mlx.rst:10 msgid "" "Let's use `flwr new` to create a complete Flower+MLX project. It will " "generate all the files needed to run, by default with the Simulation " @@ -25123,24 +25433,24 @@ msgid "" "api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner>`_." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:27 +#: ../../source/tutorial-quickstart-mlx.rst:25 msgid "" "Then, run the command below. You will be prompted to select of the " "available templates (choose ``MLX``), give a name to your project, and " "type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:57 +#: ../../source/tutorial-quickstart-mlx.rst:53 msgid "To run the project do:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:106 +#: ../../source/tutorial-quickstart-mlx.rst:102 msgid "" "You can also override the parameters defined in " "``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:122 +#: ../../source/tutorial-quickstart-mlx.rst:116 msgid "" "We will use `Flower Datasets `_ to " "easily download and partition the `MNIST` dataset. In this example you'll" @@ -25151,20 +25461,20 @@ msgid "" "api/flwr_datasets.partitioner.html>`_ available in Flower Datasets:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:166 +#: ../../source/tutorial-quickstart-mlx.rst:157 msgid "" "We define the model as in the `centralized MLX example " "`_, it's a " "simple MLP:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:190 +#: ../../source/tutorial-quickstart-mlx.rst:180 msgid "" "We also define some utility functions to test our model and to iterate " "over batches." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:212 +#: ../../source/tutorial-quickstart-mlx.rst:201 msgid "" "The main changes we have to make to use `MLX` with `Flower` will be found" " in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " @@ -25173,17 +25483,17 @@ msgid "" "messages to work)." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:218 +#: ../../source/tutorial-quickstart-mlx.rst:206 msgid "The way MLX stores its parameters is as follows:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:231 +#: ../../source/tutorial-quickstart-mlx.rst:219 msgid "" "Therefore, to get our list of ``np.array`` objects, we need to extract " "each array and convert them into a NumPy array:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:240 +#: ../../source/tutorial-quickstart-mlx.rst:228 msgid "" "For the ``set_params()`` function, we perform the reverse operation. We " "receive a list of NumPy arrays and want to convert them into MLX " @@ -25191,24 +25501,24 @@ msgid "" "them to the `weight` and `bias` keys of each layer dict:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:255 +#: ../../source/tutorial-quickstart-mlx.rst:243 msgid "" "The rest of the functionality is directly inspired by the centralized " "case. The ``fit()`` method in the client trains the model using the local" " dataset:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:272 +#: ../../source/tutorial-quickstart-mlx.rst:259 msgid "" "Here, after updating the parameters, we perform the training as in the " "centralized case, and return the new parameters." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:275 +#: ../../source/tutorial-quickstart-mlx.rst:262 msgid "And for the ``evaluate()`` method of the client:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:285 +#: ../../source/tutorial-quickstart-mlx.rst:272 msgid "" "We also begin by updating the parameters with the ones sent by the " "server, and then we compute the loss and accuracy using the functions " @@ -25216,12 +25526,12 @@ msgid "" "the `MLP` model as well as other components such as the optimizer." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:290 +#: ../../source/tutorial-quickstart-mlx.rst:277 #, fuzzy msgid "Putting everything together we have:" msgstr "Tout assembler" -#: ../../source/tutorial-quickstart-mlx.rst:344 +#: ../../source/tutorial-quickstart-mlx.rst:331 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " "defined above by means of a ``client_fn()`` callback. Note that " @@ -25232,7 +25542,7 @@ msgid "" "method." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:378 +#: ../../source/tutorial-quickstart-mlx.rst:363 msgid "" "To construct a ``ServerApp``, we define a ``server_fn()`` callback with " "an identical signature to that of ``client_fn()``, but the return type is" @@ -25243,15 +25553,15 @@ msgid "" "``FedAvg`` strategy." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:402 -#: ../../source/tutorial-quickstart-pytorch.rst:360 -#: ../../source/tutorial-quickstart-tensorflow.rst:279 +#: ../../source/tutorial-quickstart-mlx.rst:386 +#: ../../source/tutorial-quickstart-pytorch.rst:344 +#: ../../source/tutorial-quickstart-tensorflow.rst:266 msgid "" "Congratulations! You've successfully built and run your first federated " "learning system." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:407 +#: ../../source/tutorial-quickstart-mlx.rst:390 #, fuzzy msgid "" "Check the `source code `_." -#: ../../source/tutorial-quickstart-pytorch.rst:12 +#: ../../source/tutorial-quickstart-pytorch.rst:11 msgid "" "Let's use `flwr new` to create a complete Flower+PyTorch project. It will" " generate all the files needed to run, by default with the Flower " @@ -25319,14 +25629,14 @@ msgid "" "api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner>`_." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:27 +#: ../../source/tutorial-quickstart-pytorch.rst:26 msgid "" "Then, run the command below. You will be prompted to select one of the " "available templates (choose ``PyTorch``), give a name to your project, " "and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:121 +#: ../../source/tutorial-quickstart-pytorch.rst:117 msgid "" "This tutorial uses `Flower Datasets `_ " "to easily download and partition the `CIFAR-10` dataset. In this example " @@ -25340,13 +25650,13 @@ msgid "" " that correspond to their data partition." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:159 +#: ../../source/tutorial-quickstart-pytorch.rst:152 msgid "" "We defined a simple Convolutional Neural Network (CNN), but feel free to " "replace it with a more sophisticated model if you'd like:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:184 +#: ../../source/tutorial-quickstart-pytorch.rst:177 msgid "" "In addition to defining the model architecture, we also include two " "utility functions to perform both training (i.e. ``train()``) and " @@ -25359,7 +25669,7 @@ msgid "" "training or evaluation:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:236 +#: ../../source/tutorial-quickstart-pytorch.rst:226 msgid "" "The main changes we have to make to use `PyTorch` with `Flower` will be " "found in the ``get_weights()`` and ``set_weights()`` functions. In " @@ -25369,7 +25679,7 @@ msgid "" "PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:294 +#: ../../source/tutorial-quickstart-pytorch.rst:282 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " "defined above by means of a ``client_fn()`` callback. Note that the " @@ -25380,7 +25690,7 @@ msgid "" "additioinal hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:323 +#: ../../source/tutorial-quickstart-pytorch.rst:309 msgid "" "To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" " identical signature to that of ``client_fn()`` but the return type is " @@ -25394,7 +25704,7 @@ msgid "" "``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:365 +#: ../../source/tutorial-quickstart-pytorch.rst:348 #, fuzzy msgid "" "Check the `source code `_ de cet exemple se trouve dans :code:`examples" "/quickstart-mxnet`." -#: ../../source/tutorial-quickstart-pytorch.rst:372 -#: ../../source/tutorial-quickstart-tensorflow.rst:295 +#: ../../source/tutorial-quickstart-pytorch.rst:354 +#: ../../source/tutorial-quickstart-tensorflow.rst:278 #, fuzzy msgid "Video tutorial" msgstr "Tutoriel" -#: ../../source/tutorial-quickstart-pytorch.rst:376 +#: ../../source/tutorial-quickstart-pytorch.rst:358 msgid "" "The video shown below shows how to setup a PyTorch + Flower project using" " our previously recommended APIs. A new video tutorial will be released " "that shows the new APIs (as the content above does)" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 msgid "Quickstart PyTorch Lightning" msgstr "Démarrage rapide de PyTorch Lightning" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:7 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 #, fuzzy msgid "" "In this federated learning tutorial we will learn how to train an " @@ -25436,13 +25746,13 @@ msgstr "" "tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:20 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 msgid "" "This will create a new directory called `quickstart-pytorch-lightning` " "containing the following files:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:43 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 msgid "" "By default, Flower Simulation Engine will be started and it will create a" " federation of 4 nodes using `FedAvg `_." -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +#: ../../source/tutorial-quickstart-scikitlearn.rst:15 msgid "" "Our example consists of one *server* and two *clients* all having the " "same model." @@ -25508,7 +25819,7 @@ msgstr "" "Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " "même modèle." -#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +#: ../../source/tutorial-quickstart-scikitlearn.rst:17 msgid "" "*Clients* are responsible for generating individual model parameter " "updates for the model based on their local datasets. These updates are " @@ -25524,7 +25835,7 @@ msgstr "" "version améliorée du modèle à chaque *client*. Un cycle complet de mises " "à jour des paramètres s'appelle un *round*." -#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +#: ../../source/tutorial-quickstart-scikitlearn.rst:23 msgid "" "Now that we have a rough idea of what is going on, let's get started. We " "first need to install Flower. You can do this by running:" @@ -25533,22 +25844,23 @@ msgstr "" "commençons. Nous devons d'abord installer Flower. Tu peux le faire en " "lançant :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:26 +#: ../../source/tutorial-quickstart-scikitlearn.rst:30 #, fuzzy msgid "Since we want to use scikit-learn, let's go ahead and install it:" msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:32 +#: ../../source/tutorial-quickstart-scikitlearn.rst:36 msgid "Or simply install all dependencies using Poetry:" msgstr "Ou installe simplement toutes les dépendances à l'aide de Poetry :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +#, fuzzy msgid "" "Now that we have all our dependencies installed, let's run a simple " "distributed training with two clients and one server. However, before " "setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within :code:`utils.py`. The " -":code:`utils.py` contains different functions defining all the machine " +" we need for our federated learning setup within ``utils.py``. The " +"``utils.py`` contains different functions defining all the machine " "learning basics:" msgstr "" "Maintenant que toutes nos dépendances sont installées, exécutons une " @@ -25559,40 +25871,45 @@ msgstr "" "contient différentes fonctions définissant toutes les bases de " "l'apprentissage automatique :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid ":code:`get_model_parameters()`" +#: ../../source/tutorial-quickstart-scikitlearn.rst:51 +#, fuzzy +msgid "``get_model_parameters()``" msgstr ":code:`get_model_parameters()`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:46 -msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#, fuzzy +msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" msgstr "" "Renvoie les paramètres d'un modèle de régression logistique " ":code:`sklearn`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:47 -msgid ":code:`set_model_params()`" +#: ../../source/tutorial-quickstart-scikitlearn.rst:53 +#, fuzzy +msgid "``set_model_params()``" msgstr ":code:`set_model_params()`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:48 +#: ../../source/tutorial-quickstart-scikitlearn.rst:54 #, fuzzy -msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" msgstr "Définit les paramètres d'un modèle de régression logistique :code:`sklean`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid ":code:`set_initial_params()`" +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +#, fuzzy +msgid "``set_initial_params()``" msgstr ":code:`set_initial_params()`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 msgid "Initializes the model parameters that the Flower server will ask for" msgstr "Initialise les paramètres du modèle que le serveur de Flower demandera" -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#: ../../source/tutorial-quickstart-scikitlearn.rst:58 +#, fuzzy msgid "" -"Please check out :code:`utils.py` `here " +"Please check out ``utils.py`` `here " "`_ for more details. The pre-defined functions are used in" -" the :code:`client.py` and imported. The :code:`client.py` also requires " -"to import several packages such as Flower and scikit-learn:" +" the ``client.py`` and imported. The ``client.py`` also requires to " +"import several packages such as Flower and scikit-learn:" msgstr "" "Tu peux consulter :code:`utils.py` `ici " "`_. The " -":code:`FederatedDataset.load_partition()` method loads the partitioned " -"training set for each partition ID defined in the :code:`--partition-id` " +"``FederatedDataset.load_partition()`` method loads the partitioned " +"training set for each partition ID defined in the ``--partition-id`` " "argument." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:95 +#: ../../source/tutorial-quickstart-scikitlearn.rst:106 +#, fuzzy msgid "" "Next, the logistic regression model is defined and initialized with " -":code:`utils.set_initial_params()`." +"``utils.set_initial_params()``." msgstr "" "Ensuite, le modèle de régression logistique est défini et initialisé avec" " :code:`utils.set_initial_params()`." -#: ../../source/tutorial-quickstart-scikitlearn.rst:107 +#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +#, fuzzy msgid "" "The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to fit the logistic regression we defined earlier)." +"``Client``. When the server selects a particular client for training, it " +"sends training instructions over the network. The client receives those " +"instructions and calls one of the ``Client`` methods to run your code " +"(i.e., to fit the logistic regression we defined earlier)." msgstr "" "Le serveur Flower interagit avec les clients par le biais d'une interface" " appelée :code:`Client`. Lorsque le serveur sélectionne un client " @@ -25635,13 +25954,13 @@ msgstr "" "méthodes :code:`Client` pour exécuter ton code (c'est-à-dire pour ajuster" " la régression logistique que nous avons définie plus tôt)." -#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +#, fuzzy msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " -"means defining the following methods (:code:`set_parameters` is optional " -"though):" +"Flower provides a convenience class called ``NumPyClient`` which makes it" +" easier to implement the ``Client`` interface when your workload uses " +"scikit-learn. Implementing ``NumPyClient`` usually means defining the " +"following methods (``set_parameters`` is optional though):" msgstr "" "Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " "facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" @@ -25649,15 +25968,16 @@ msgstr "" "signifie généralement définir les méthodes suivantes " "(:code:`set_parameters` est cependant facultatif) :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +#: ../../source/tutorial-quickstart-scikitlearn.rst:130 msgid "return the model weight as a list of NumPy ndarrays" msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 -msgid ":code:`set_parameters` (optional)" +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +#, fuzzy +msgid "``set_parameters`` (optional)" msgstr ":code:`set_parameters` (optionnel)" -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 msgid "" "update the local model weights with the parameters received from the " "server" @@ -25665,51 +25985,53 @@ msgstr "" "mettre à jour les poids du modèle local avec les paramètres reçus du " "serveur" -#: ../../source/tutorial-quickstart-scikitlearn.rst:122 -msgid "is directly imported with :code:`utils.set_model_params()`" +#: ../../source/tutorial-quickstart-scikitlearn.rst:133 +#, fuzzy +msgid "is directly imported with ``utils.set_model_params()``" msgstr "est directement importé avec :code:`utils.set_model_params()`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +#: ../../source/tutorial-quickstart-scikitlearn.rst:135 msgid "set the local model weights" msgstr "fixe les poids du modèle local" -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 msgid "train the local model" msgstr "entraîne le modèle local" -#: ../../source/tutorial-quickstart-scikitlearn.rst:126 +#: ../../source/tutorial-quickstart-scikitlearn.rst:137 #, fuzzy msgid "return the updated local model weights" msgstr "recevoir les poids du modèle local mis à jour" -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 msgid "test the local model" msgstr "teste le modèle local" -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 +#: ../../source/tutorial-quickstart-scikitlearn.rst:141 msgid "The methods can be implemented in the following way:" msgstr "Les méthodes peuvent être mises en œuvre de la manière suivante :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:153 +#: ../../source/tutorial-quickstart-scikitlearn.rst:163 +#, fuzzy msgid "" -"We can now create an instance of our class :code:`MnistClient` and add " -"one line to actually run this client:" +"We can now create an instance of our class ``MnistClient`` and add one " +"line to actually run this client:" msgstr "" "Nous pouvons maintenant créer une instance de notre classe " ":code:`MnistClient` et ajouter une ligne pour exécuter ce client :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:160 +#: ../../source/tutorial-quickstart-scikitlearn.rst:170 #, fuzzy msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" -" the client which server to connect to. In our case we can run the server" -" and the client on the same machine, therefore we use " -":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we pass to the client." +"That's it for the client. We only have to implement ``Client`` or " +"``NumPyClient`` and call ``fl.client.start_client()``. If you implement a" +" client of type ``NumPyClient`` you'll need to first call its " +"``to_client()`` method. The string ``\"0.0.0.0:8080\"`` tells the client " +"which server to connect to. In our case we can run the server and the " +"client on the same machine, therefore we use ``\"0.0.0.0:8080\"``. If we " +"run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" pass to the client." msgstr "" "C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" " :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " @@ -25721,7 +26043,7 @@ msgstr "" "machines différentes, tout ce qui doit changer est :code:`server_address`" " que nous transmettons au client." -#: ../../source/tutorial-quickstart-scikitlearn.rst:169 +#: ../../source/tutorial-quickstart-scikitlearn.rst:181 msgid "" "The following Flower server is a little bit more advanced and returns an " "evaluation function for the server-side evaluation. First, we import " @@ -25732,19 +26054,20 @@ msgstr "" " à nouveau toutes les bibliothèques requises telles que Flower et scikit-" "learn." -#: ../../source/tutorial-quickstart-scikitlearn.rst:172 -msgid ":code:`server.py`, import Flower and start the server:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +#, fuzzy +msgid "``server.py``, import Flower and start the server:" msgstr ":code:`server.py`, importe Flower et démarre le serveur :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 #, fuzzy msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and" -" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " -"function is called after each federated learning round and gives you " -"information about loss and accuracy. Note that we also make use of Flower" -" Datasets here to load the test split of the MNIST dataset for server-" -"side evaluation." +"The number of federated learning rounds is set in ``fit_round()`` and the" +" evaluation is defined in ``get_evaluate_fn()``. The evaluation function " +"is called after each federated learning round and gives you information " +"about loss and accuracy. Note that we also make use of Flower Datasets " +"here to load the test split of the MNIST dataset for server-side " +"evaluation." msgstr "" "Le nombre de tours d'apprentissage fédéré est défini dans " ":code:`fit_round()` et l'évaluation est définie dans " @@ -25752,15 +26075,16 @@ msgstr "" "chaque tour d'apprentissage fédéré et te donne des informations sur la " "perte et la précision." -#: ../../source/tutorial-quickstart-scikitlearn.rst:213 +#: ../../source/tutorial-quickstart-scikitlearn.rst:228 +#, fuzzy msgid "" -"The :code:`main` contains the server-side parameter initialization " -":code:`utils.set_initial_params()` as well as the aggregation strategy " -":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " +"The ``main`` contains the server-side parameter initialization " +"``utils.set_initial_params()`` as well as the aggregation strategy " +"``fl.server.strategy:FedAvg()``. The strategy is the default one, " "federated averaging (or FedAvg), with two clients and evaluation after " "each federated learning round. The server can be started with the command" -" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +" ``fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))``." msgstr "" "Le :code:`main` contient l'initialisation des paramètres côté serveur " ":code:`utils.set_initial_params()` ainsi que la stratégie d'agrégation " @@ -25770,7 +26094,7 @@ msgstr "" " commande :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " "strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." -#: ../../source/tutorial-quickstart-scikitlearn.rst:232 +#: ../../source/tutorial-quickstart-scikitlearn.rst:256 msgid "" "With both client and server ready, we can now run everything and see " "federated learning in action. Federated learning systems usually have a " @@ -25782,8 +26106,8 @@ msgstr "" "fédéré ont généralement un serveur et plusieurs clients. Nous devons donc" " commencer par lancer le serveur :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-xgboost.rst:575 +#: ../../source/tutorial-quickstart-scikitlearn.rst:264 +#: ../../source/tutorial-quickstart-xgboost.rst:598 msgid "" "Once the server is running we can start the clients in different " "terminals. Open a new terminal and start the first client:" @@ -25792,13 +26116,13 @@ msgstr "" "dans différents terminaux. Ouvre un nouveau terminal et démarre le " "premier client :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-xgboost.rst:582 +#: ../../source/tutorial-quickstart-scikitlearn.rst:271 +#: ../../source/tutorial-quickstart-xgboost.rst:605 msgid "Open another terminal and start the second client:" msgstr "Ouvre un autre terminal et démarre le deuxième client :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:252 -#: ../../source/tutorial-quickstart-xgboost.rst:588 +#: ../../source/tutorial-quickstart-scikitlearn.rst:277 +#: ../../source/tutorial-quickstart-xgboost.rst:611 msgid "" "Each client will have its own dataset. You should now see how the " "training does in the very first terminal (the one that started the " @@ -25808,13 +26132,14 @@ msgstr "" "voir comment la formation se déroule dans le tout premier terminal (celui" " qui a démarré le serveur) :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:286 +#: ../../source/tutorial-quickstart-scikitlearn.rst:311 +#, fuzzy msgid "" "Congratulations! You've successfully built and run your first federated " "learning system. The full `source code " "`_ for this example can be found in :code:`examples/sklearn-logreg-" -"mnist`." +"mnist>`_ for this example can be found in ``examples/sklearn-logreg-" +"mnist``." msgstr "" "Félicitations ! Tu as réussi à construire et à faire fonctionner ton " "premier système d'apprentissage fédéré. Le code source complet " @@ -25828,11 +26153,11 @@ msgid "" "with TensorFlow to train a CNN model on CIFAR-10." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:5 +#: ../../source/tutorial-quickstart-tensorflow.rst:4 msgid "Quickstart TensorFlow" msgstr "Démarrage rapide de TensorFlow" -#: ../../source/tutorial-quickstart-tensorflow.rst:7 +#: ../../source/tutorial-quickstart-tensorflow.rst:6 #, fuzzy msgid "" "In this tutorial we will learn how to train a Convolutional Neural " @@ -25844,7 +26169,7 @@ msgstr "" "tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-quickstart-tensorflow.rst:13 +#: ../../source/tutorial-quickstart-tensorflow.rst:11 msgid "" "Let's use `flwr new` to create a complete Flower+TensorFlow project. It " "will generate all the files needed to run, by default with the Flower " @@ -25856,14 +26181,14 @@ msgid "" "api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner>`_." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:28 +#: ../../source/tutorial-quickstart-tensorflow.rst:26 msgid "" "Then, run the command below. You will be prompted to select one of the " "available templates (choose ``TensorFlow``), give a name to your project," " and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:118 +#: ../../source/tutorial-quickstart-tensorflow.rst:114 msgid "" "This tutorial uses `Flower Datasets `_ " "to easily download and partition the `CIFAR-10` dataset. In this example " @@ -25877,14 +26202,14 @@ msgid "" " correspond to their data partition." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:147 +#: ../../source/tutorial-quickstart-tensorflow.rst:141 msgid "" "Next, we need a model. We defined a simple Convolutional Neural Network " "(CNN), but feel free to replace it with a more sophisticated model if " "you'd like:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:178 +#: ../../source/tutorial-quickstart-tensorflow.rst:170 msgid "" "With `TensorFlow`, we can use the built-in ``get_weights()`` and " "``set_weights()`` functions, which simplifies the implementation with " @@ -25895,7 +26220,7 @@ msgid "" "set that the client might have:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:212 +#: ../../source/tutorial-quickstart-tensorflow.rst:203 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " "defined above by means of a ``client_fn()`` callback. Note that the " @@ -25907,7 +26232,7 @@ msgid "" "``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:247 +#: ../../source/tutorial-quickstart-tensorflow.rst:234 msgid "" "To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" " identical signature to that of ``client_fn()`` but the return type is " @@ -25919,7 +26244,7 @@ msgid "" "the global model to federate." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:284 +#: ../../source/tutorial-quickstart-tensorflow.rst:270 #, fuzzy msgid "" "Check the source code of the extended version of this tutorial in " @@ -25931,7 +26256,7 @@ msgstr "" "mxnet/client.py>`_ de cet exemple se trouve dans :code:`examples" "/quickstart-mxnet`." -#: ../../source/tutorial-quickstart-tensorflow.rst:299 +#: ../../source/tutorial-quickstart-tensorflow.rst:282 msgid "" "The video shown below shows how to setup a TensorFlow + Flower project " "using our previously recommended APIs. A new video tutorial will be " @@ -25944,16 +26269,16 @@ msgid "" "with XGBoost to train classification models on trees." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:5 +#: ../../source/tutorial-quickstart-xgboost.rst:4 msgid "Quickstart XGBoost" msgstr "Démarrage rapide XGBoost" -#: ../../source/tutorial-quickstart-xgboost.rst:14 +#: ../../source/tutorial-quickstart-xgboost.rst:13 #, fuzzy msgid "Federated XGBoost" msgstr "Formation fédérée" -#: ../../source/tutorial-quickstart-xgboost.rst:16 +#: ../../source/tutorial-quickstart-xgboost.rst:15 msgid "" "EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " "implementation of gradient-boosted decision tree (**GBDT**), that " @@ -25963,19 +26288,19 @@ msgid "" "concurrently, unlike the sequential approach taken by GBDT." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:20 +#: ../../source/tutorial-quickstart-xgboost.rst:21 msgid "" "Often, for tabular data on medium-sized datasets with fewer than 10k " "training examples, XGBoost surpasses the results of deep learning " "techniques." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:23 +#: ../../source/tutorial-quickstart-xgboost.rst:25 #, fuzzy msgid "Why federated XGBoost?" msgstr "Qu'est-ce que l'apprentissage fédéré ?" -#: ../../source/tutorial-quickstart-xgboost.rst:25 +#: ../../source/tutorial-quickstart-xgboost.rst:27 msgid "" "Indeed, as the demand for data privacy and decentralized learning grows, " "there's an increasing requirement to implement federated XGBoost systems " @@ -25983,7 +26308,7 @@ msgid "" "detection." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/tutorial-quickstart-xgboost.rst:31 msgid "" "Federated learning ensures that raw data remains on the local device, " "making it an attractive approach for sensitive domains where data " @@ -25992,10 +26317,10 @@ msgid "" "solution for these specific challenges." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:30 +#: ../../source/tutorial-quickstart-xgboost.rst:36 msgid "" "In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " +"HIGGS dataset using Flower and ``xgboost`` package. We use a simple " "example (`full code xgboost-quickstart " "`_)" " with two *clients* and one *server* to demonstrate how federated XGBoost" @@ -26004,11 +26329,11 @@ msgid "" "comprehensive>`_) to run various experiments." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:37 +#: ../../source/tutorial-quickstart-xgboost.rst:46 msgid "Environment Setup" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:39 +#: ../../source/tutorial-quickstart-xgboost.rst:48 #, fuzzy msgid "" "First of all, it is recommended to create a virtual environment and run " @@ -26019,20 +26344,20 @@ msgstr "" "tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-quickstart-xgboost.rst:41 +#: ../../source/tutorial-quickstart-xgboost.rst:51 msgid "" "We first need to install Flower and Flower Datasets. You can do this by " "running :" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:47 +#: ../../source/tutorial-quickstart-xgboost.rst:57 #, fuzzy msgid "" -"Since we want to use :code:`xgboost` package to build up XGBoost trees, " -"let's go ahead and install :code:`xgboost`:" +"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" +" go ahead and install ``xgboost``:" msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" -#: ../../source/tutorial-quickstart-xgboost.rst:57 +#: ../../source/tutorial-quickstart-xgboost.rst:67 msgid "" "*Clients* are responsible for generating individual weight-updates for " "the model based on their local datasets. Now that we have all our " @@ -26040,131 +26365,129 @@ msgid "" "clients and one server." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:60 +#: ../../source/tutorial-quickstart-xgboost.rst:71 #, fuzzy msgid "" -"In a file called :code:`client.py`, import xgboost, Flower, Flower " -"Datasets and other related functions:" +"In a file called ``client.py``, import xgboost, Flower, Flower Datasets " +"and other related functions:" msgstr "" "Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " "liés à PyTorch :" -#: ../../source/tutorial-quickstart-xgboost.rst:87 +#: ../../source/tutorial-quickstart-xgboost.rst:99 msgid "Dataset partition and hyper-parameter selection" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:89 +#: ../../source/tutorial-quickstart-xgboost.rst:101 msgid "" "Prior to local training, we require loading the HIGGS dataset from Flower" " Datasets and conduct data partitioning for FL:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:102 +#: ../../source/tutorial-quickstart-xgboost.rst:115 msgid "" "In this example, we split the dataset into 30 partitions with uniform " -"distribution (:code:`IidPartitioner(num_partitions=30)`). Then, we load " -"the partition for the given client based on :code:`partition_id`:" +"distribution (``IidPartitioner(num_partitions=30)``). Then, we load the " +"partition for the given client based on ``partition_id``:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:121 +#: ../../source/tutorial-quickstart-xgboost.rst:135 msgid "" "After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for :code:`xgboost` package." +"local data), and transform data format for ``xgboost`` package." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:134 +#: ../../source/tutorial-quickstart-xgboost.rst:149 msgid "" -"The functions of :code:`train_test_split` and " -":code:`transform_dataset_to_dmatrix` are defined as below:" +"The functions of ``train_test_split`` and " +"``transform_dataset_to_dmatrix`` are defined as below:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:158 +#: ../../source/tutorial-quickstart-xgboost.rst:174 msgid "Finally, we define the hyper-parameters used for XGBoost training." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:174 +#: ../../source/tutorial-quickstart-xgboost.rst:190 msgid "" -"The :code:`num_local_round` represents the number of iterations for local" -" tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " -"evaluation metric." +"The ``num_local_round`` represents the number of iterations for local " +"tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " +"metric." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:181 +#: ../../source/tutorial-quickstart-xgboost.rst:195 msgid "Flower client definition for XGBoost" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:183 +#: ../../source/tutorial-quickstart-xgboost.rst:197 msgid "" "After loading the dataset we define the Flower client. We follow the " -"general rule to define :code:`XgbClient` class inherited from " -":code:`fl.client.Client`." +"general rule to define ``XgbClient`` class inherited from " +"``fl.client.Client``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:205 +#: ../../source/tutorial-quickstart-xgboost.rst:219 msgid "" -"All required parameters defined above are passed to :code:`XgbClient`'s " +"All required parameters defined above are passed to ``XgbClient``'s " "constructor." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:207 +#: ../../source/tutorial-quickstart-xgboost.rst:221 msgid "" -"Then, we override :code:`get_parameters`, :code:`fit` and " -":code:`evaluate` methods insides :code:`XgbClient` class as follows." +"Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods " +"insides ``XgbClient`` class as follows." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:221 +#: ../../source/tutorial-quickstart-xgboost.rst:236 msgid "" "Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use " -":code:`get_parameters` and :code:`set_parameters` to initialise model " -"parameters for XGBoost. As a result, let's return an empty tensor in " -":code:`get_parameters` when it is called by the server at the first " -"round." +"specified random weights. In this case, we do not use ``get_parameters`` " +"and ``set_parameters`` to initialise model parameters for XGBoost. As a " +"result, let's return an empty tensor in ``get_parameters`` when it is " +"called by the server at the first round." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:262 +#: ../../source/tutorial-quickstart-xgboost.rst:278 msgid "" -"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " -"up the first set of trees. From the second round, we load the global " -"model sent from server to new build Booster object, and then update model" -" weights on local training data with function :code:`local_boost` as " -"follows:" +"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " +"first set of trees. From the second round, we load the global model sent " +"from server to new build Booster object, and then update model weights on" +" local training data with function ``local_boost`` as follows:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:281 +#: ../../source/tutorial-quickstart-xgboost.rst:298 msgid "" -"Given :code:`num_local_round`, we update trees by calling " -":code:`bst_input.update` method. After training, the last " -":code:`N=num_local_round` trees will be extracted to send to the server." +"Given ``num_local_round``, we update trees by calling " +"``bst_input.update`` method. After training, the last " +"``N=num_local_round`` trees will be extracted to send to the server." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:313 +#: ../../source/tutorial-quickstart-xgboost.rst:330 msgid "" -"In :code:`evaluate`, after loading the global model, we call " -":code:`bst.eval_set` function to conduct evaluation on valid set. The AUC" -" value will be returned." +"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" +" function to conduct evaluation on valid set. The AUC value will be " +"returned." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:316 +#: ../../source/tutorial-quickstart-xgboost.rst:333 #, fuzzy msgid "" -"Now, we can create an instance of our class :code:`XgbClient` and add one" -" line to actually run this client:" +"Now, we can create an instance of our class ``XgbClient`` and add one " +"line to actually run this client:" msgstr "" "Nous pouvons maintenant créer une instance de notre classe " ":code:`MnistClient` et ajouter une ligne pour exécuter ce client :" -#: ../../source/tutorial-quickstart-xgboost.rst:332 +#: ../../source/tutorial-quickstart-xgboost.rst:350 #, fuzzy msgid "" -"That's it for the client. We only have to implement :code:`Client` and " -"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"That's it for the client. We only have to implement ``Client`` and call " +"``fl.client.start_client()``. The string ``\"[::]:8080\"`` tells the " +"client which server to connect to. In our case we can run the server and " +"the client on the same machine, therefore we use ``\"[::]:8080\"``. If we" +" run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" point the client at." msgstr "" "C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" " :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " @@ -26176,7 +26499,7 @@ msgstr "" "différentes, tout ce qui doit changer est l'adresse " ":code:`server_address` vers laquelle nous dirigeons le client." -#: ../../source/tutorial-quickstart-xgboost.rst:343 +#: ../../source/tutorial-quickstart-xgboost.rst:360 #, fuzzy msgid "" "These updates are then sent to the *server* which will aggregate them to " @@ -26190,90 +26513,88 @@ msgstr "" "cette version améliorée du modèle à chaque *client*. Un cycle complet de " "mises à jour de poids s'appelle un *round*." -#: ../../source/tutorial-quickstart-xgboost.rst:346 +#: ../../source/tutorial-quickstart-xgboost.rst:364 #, fuzzy msgid "" -"In a file named :code:`server.py`, import Flower and FedXgbBagging from " -":code:`flwr.server.strategy`." +"In a file named ``server.py``, import Flower and FedXgbBagging from " +"``flwr.server.strategy``." msgstr "" "Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " "liés au MXNet :" -#: ../../source/tutorial-quickstart-xgboost.rst:348 +#: ../../source/tutorial-quickstart-xgboost.rst:367 msgid "We first define a strategy for XGBoost bagging aggregation." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:380 +#: ../../source/tutorial-quickstart-xgboost.rst:401 msgid "" -"We use two clients for this example. An " -":code:`evaluate_metrics_aggregation` function is defined to collect and " -"wighted average the AUC values from clients. The :code:`config_func` " -"function is to return the current FL round number to client's " -":code:`fit()` and :code:`evaluate()` methods." +"We use two clients for this example. An ``evaluate_metrics_aggregation`` " +"function is defined to collect and wighted average the AUC values from " +"clients. The ``config_func`` function is to return the current FL round " +"number to client's ``fit()`` and ``evaluate()`` methods." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:384 +#: ../../source/tutorial-quickstart-xgboost.rst:406 #, fuzzy msgid "Then, we start the server:" msgstr "Démarrer le serveur" -#: ../../source/tutorial-quickstart-xgboost.rst:396 +#: ../../source/tutorial-quickstart-xgboost.rst:418 msgid "Tree-based bagging aggregation" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:398 +#: ../../source/tutorial-quickstart-xgboost.rst:420 msgid "" "You must be curious about how bagging aggregation works. Let's look into " "the details." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:400 +#: ../../source/tutorial-quickstart-xgboost.rst:422 msgid "" -"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " -":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." -" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " -"and :code:`evaluate` methods as follows:" +"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " +"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " +"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " +"``evaluate`` methods as follows:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:496 +#: ../../source/tutorial-quickstart-xgboost.rst:519 msgid "" -"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " -"trees by calling :code:`aggregate()` function:" +"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " +"trees by calling ``aggregate()`` function:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:555 +#: ../../source/tutorial-quickstart-xgboost.rst:579 msgid "" "In this function, we first fetch the number of trees and the number of " "parallel trees for the current and previous model by calling " -":code:`_get_tree_nums`. Then, the fetched information will be aggregated." -" After that, the trees (containing model weights) are aggregated to " +"``_get_tree_nums``. Then, the fetched information will be aggregated. " +"After that, the trees (containing model weights) are aggregated to " "generate a new tree model." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:560 +#: ../../source/tutorial-quickstart-xgboost.rst:584 msgid "" "After traversal of all clients' models, a new global model is generated, " "followed by the serialisation, and sending back to each client." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:565 +#: ../../source/tutorial-quickstart-xgboost.rst:588 msgid "Launch Federated XGBoost!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:641 +#: ../../source/tutorial-quickstart-xgboost.rst:664 msgid "" "Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in " -":code:`metrics_distributed`. One can see that the average AUC increases " -"over FL rounds." +"XGBoost system. The AUC values can be checked in ``metrics_distributed``." +" One can see that the average AUC increases over FL rounds." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:646 +#: ../../source/tutorial-quickstart-xgboost.rst:668 #, fuzzy msgid "" "The full `source code `_ for this example can be found in :code:`examples" -"/xgboost-quickstart`." +"/xgboost-quickstart/>`_ for this example can be found in ``examples" +"/xgboost-quickstart``." msgstr "" "Félicitations ! Tu as réussi à construire et à faire fonctionner ton " "premier système d'apprentissage fédéré. Le code source complet " @@ -26281,11 +26602,11 @@ msgstr "" "mxnet/client.py>`_ de cet exemple se trouve dans :code:`examples" "/quickstart-mxnet`." -#: ../../source/tutorial-quickstart-xgboost.rst:650 +#: ../../source/tutorial-quickstart-xgboost.rst:673 msgid "Comprehensive Federated XGBoost" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:652 +#: ../../source/tutorial-quickstart-xgboost.rst:675 msgid "" "Now that you have known how federated XGBoost work with Flower, it's time" " to run some more comprehensive experiments by customising the " @@ -26298,12 +26619,12 @@ msgid "" "client cohorts in a resource-aware manner. Let's take a look!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:659 +#: ../../source/tutorial-quickstart-xgboost.rst:685 #, fuzzy msgid "Cyclic training" msgstr "Formation centralisée" -#: ../../source/tutorial-quickstart-xgboost.rst:661 +#: ../../source/tutorial-quickstart-xgboost.rst:687 msgid "" "In addition to bagging aggregation, we offer a cyclic training scheme, " "which performs FL in a client-by-client fashion. Instead of aggregating " @@ -26313,183 +26634,181 @@ msgid "" "for next round's boosting." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:665 -msgid "" -"To do this, we first customise a :code:`ClientManager` in " -":code:`server_utils.py`:" +#: ../../source/tutorial-quickstart-xgboost.rst:693 +msgid "To do this, we first customise a ``ClientManager`` in ``server_utils.py``:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:705 +#: ../../source/tutorial-quickstart-xgboost.rst:733 msgid "" -"The customised :code:`ClientManager` samples all available clients in " -"each FL round based on the order of connection to the server. Then, we " -"define a new strategy :code:`FedXgbCyclic` in " -":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " +"The customised ``ClientManager`` samples all available clients in each FL" +" round based on the order of connection to the server. Then, we define a " +"new strategy ``FedXgbCyclic`` in " +"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " "select only one client in given round and pass the received model to next" " client." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:746 +#: ../../source/tutorial-quickstart-xgboost.rst:775 msgid "" -"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " +"Unlike the original ``FedAvg``, we don't perform aggregation here. " "Instead, we just make a copy of the received client model as global model" -" by overriding :code:`aggregate_fit`." +" by overriding ``aggregate_fit``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:749 +#: ../../source/tutorial-quickstart-xgboost.rst:778 msgid "" -"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" -" methods ensure the clients to be sequentially selected given FL round:" +"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" +" ensure the clients to be sequentially selected given FL round:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:813 +#: ../../source/tutorial-quickstart-xgboost.rst:840 msgid "Customised data partitioning" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:815 +#: ../../source/tutorial-quickstart-xgboost.rst:842 msgid "" -"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" -" to instantiate the data partitioner based on the given " -":code:`num_partitions` and :code:`partitioner_type`. Currently, we " -"provide four supported partitioner type to simulate the uniformity/non-" -"uniformity in data quantity (uniform, linear, square, exponential)." +"In ``dataset.py``, we have a function ``instantiate_partitioner`` to " +"instantiate the data partitioner based on the given ``num_partitions`` " +"and ``partitioner_type``. Currently, we provide four supported " +"partitioner type to simulate the uniformity/non-uniformity in data " +"quantity (uniform, linear, square, exponential)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:846 +#: ../../source/tutorial-quickstart-xgboost.rst:873 #, fuzzy msgid "Customised centralised/distributed evaluation" msgstr "Évaluation centralisée" -#: ../../source/tutorial-quickstart-xgboost.rst:848 +#: ../../source/tutorial-quickstart-xgboost.rst:875 msgid "" "To facilitate centralised evaluation, we define a function in " -":code:`server_utils.py`:" +"``server_utils.py``:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:880 +#: ../../source/tutorial-quickstart-xgboost.rst:907 msgid "" "This function returns a evaluation function which instantiates a " -":code:`Booster` object and loads the global model weights to it. The " -"evaluation is conducted by calling :code:`eval_set()` method, and the " -"tested AUC value is reported." +"``Booster`` object and loads the global model weights to it. The " +"evaluation is conducted by calling ``eval_set()`` method, and the tested " +"AUC value is reported." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:883 +#: ../../source/tutorial-quickstart-xgboost.rst:911 msgid "" "As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the :code:`evaluate()` method insides the " -":code:`XgbClient` class in :code:`client_utils.py`." +"start example by overriding the ``evaluate()`` method insides the " +"``XgbClient`` class in ``client_utils.py``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:887 +#: ../../source/tutorial-quickstart-xgboost.rst:916 #, fuzzy msgid "Flower simulation" msgstr "Simulation de moniteur" -#: ../../source/tutorial-quickstart-xgboost.rst:888 +#: ../../source/tutorial-quickstart-xgboost.rst:918 msgid "" -"We also provide an example code (:code:`sim.py`) to use the simulation " +"We also provide an example code (``sim.py``) to use the simulation " "capabilities of Flower to simulate federated XGBoost training on either a" " single machine or a cluster of machines." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:922 +#: ../../source/tutorial-quickstart-xgboost.rst:954 msgid "" -"After importing all required packages, we define a :code:`main()` " -"function to perform the simulation process:" +"After importing all required packages, we define a ``main()`` function to" +" perform the simulation process:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:977 +#: ../../source/tutorial-quickstart-xgboost.rst:1010 msgid "" "We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a :code:`list`. After the simulation begins, " -"the clients won't need to pre-process their partitions again." +"processed data is stored in a ``list``. After the simulation begins, the " +"clients won't need to pre-process their partitions again." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:980 +#: ../../source/tutorial-quickstart-xgboost.rst:1014 msgid "Then, we define the strategies and other hyper-parameters:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1031 +#: ../../source/tutorial-quickstart-xgboost.rst:1065 msgid "" "After that, we start the simulation by calling " -":code:`fl.simulation.start_simulation`:" +"``fl.simulation.start_simulation``:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1051 +#: ../../source/tutorial-quickstart-xgboost.rst:1085 msgid "" -"One of key parameters for :code:`start_simulation` is :code:`client_fn` " -"which returns a function to construct a client. We define it as follows:" +"One of key parameters for ``start_simulation`` is ``client_fn`` which " +"returns a function to construct a client. We define it as follows:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1094 +#: ../../source/tutorial-quickstart-xgboost.rst:1126 msgid "Arguments parser" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1096 +#: ../../source/tutorial-quickstart-xgboost.rst:1128 msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients, server " -"and simulation, allowing users to specify different experimental " -"settings. Let's first see the sever side:" +"In ``utils.py``, we define the arguments parsers for clients, server and " +"simulation, allowing users to specify different experimental settings. " +"Let's first see the sever side:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1142 +#: ../../source/tutorial-quickstart-xgboost.rst:1175 msgid "" "This allows user to specify training strategies / the number of total " "clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" -" will do centralised evaluation and all functionalities for client " +" evaluation fashion. Note that with ``--centralised-eval``, the sever " +"will do centralised evaluation and all functionalities for client " "evaluation will be disabled." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1146 +#: ../../source/tutorial-quickstart-xgboost.rst:1180 msgid "Then, the argument parser on client side:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1200 +#: ../../source/tutorial-quickstart-xgboost.rst:1234 msgid "" "This defines various options for client data partitioning. Besides, " "clients also have an option to conduct evaluation on centralised test set" -" by setting :code:`--centralised-eval`, as well as an option to perform " -"scaled learning rate based on the number of clients by setting :code" -":`--scaled-lr`." +" by setting ``--centralised-eval``, as well as an option to perform " +"scaled learning rate based on the number of clients by setting " +"``--scaled-lr``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1204 +#: ../../source/tutorial-quickstart-xgboost.rst:1239 msgid "We also have an argument parser for simulation:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1282 +#: ../../source/tutorial-quickstart-xgboost.rst:1317 msgid "This integrates all arguments for both client and server sides." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1285 +#: ../../source/tutorial-quickstart-xgboost.rst:1320 #, fuzzy msgid "Example commands" msgstr "Exemples de PyTorch" -#: ../../source/tutorial-quickstart-xgboost.rst:1287 +#: ../../source/tutorial-quickstart-xgboost.rst:1322 msgid "" "To run a centralised evaluated experiment with bagging strategy on 5 " "clients with exponential distribution for 50 rounds, we first start the " "server as below:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1294 +#: ../../source/tutorial-quickstart-xgboost.rst:1329 #, fuzzy msgid "Then, on each client terminal, we start the clients:" msgstr "Ouvre un autre terminal et démarre le deuxième client :" -#: ../../source/tutorial-quickstart-xgboost.rst:1300 +#: ../../source/tutorial-quickstart-xgboost.rst:1335 msgid "To run the same experiment with Flower simulation:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1306 +#: ../../source/tutorial-quickstart-xgboost.rst:1341 #, fuzzy msgid "" "The full `code `_ for this comprehensive example can be found in" -" :code:`examples/xgboost-comprehensive`." +" ``examples/xgboost-comprehensive``." msgstr "" "Félicitations ! Tu as réussi à construire et à faire fonctionner ton " "premier système d'apprentissage fédéré. Le code source complet " @@ -28488,7 +28807,7 @@ msgstr "" "chose d'autre, comme la régression linéaire classique." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +msgid "|3a7aceef05f0421794726ac54aaf12fd|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 @@ -28507,7 +28826,7 @@ msgstr "" " Go." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|33cacb7d985c4906b348515c1a5cd993|" +msgid "|d741075f8e624331b42c0746f7d258a0|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 @@ -28538,7 +28857,7 @@ msgstr "" "chanson." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|cc080a555947492fa66131dc3a967603|" +msgid "|8fc92d668bcb42b8bda55143847f2329|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 @@ -28559,7 +28878,7 @@ msgstr "" " données pour la même tâche." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|085c3e0fb8664c6aa06246636524b20b|" +msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 @@ -28580,7 +28899,7 @@ msgstr "" "cloud." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|bfe69c74e48c45d49b50251c38c2a019|" +msgid "|77a037b546a84262b608e04bc82a2c96|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 @@ -28601,7 +28920,7 @@ msgstr "" "appuyés." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +msgid "|f568e24c9fb0435690ac628210a4be96|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 @@ -28626,7 +28945,7 @@ msgstr "" " sur un serveur centralisé." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|163117eb654a4273babba413cf8065f5|" +msgid "|a7bf029981514e2593aa3a2b48c9d76a|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 @@ -28645,7 +28964,7 @@ msgstr "" "suffisantes pour former un bon modèle." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +msgid "|3f645ad807f84be8b1f8f3267173939c|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 @@ -28867,7 +29186,7 @@ msgstr "" "partir d'un point de contrôle précédemment sauvegardé." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|f403fcd69e4e44409627e748b404c086|" +msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 @@ -28902,7 +29221,7 @@ msgstr "" "rendements décroissants." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|4b00fe63870145968f8443619a792a42|" +msgid "|edcf9a04d96e42608fd01a333375febe|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 @@ -28935,7 +29254,7 @@ msgstr "" "données locales, ou même de quelques étapes (mini-batchs)." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|368378731066486fa4397e89bc6b870c|" +msgid "|3dae22fe797043968e2b7aa7073c78bd|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 @@ -28966,7 +29285,7 @@ msgstr "" " l'entraînement local." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|a66aa83d85bf4ffba7ed660b718066da|" +msgid "|ba178f75267d4ad8aa7363f20709195f|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 @@ -29025,7 +29344,7 @@ msgstr "" "times as much as each of the 100 examples." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|82324b9af72a4582a81839d55caab767|" +msgid "|c380c750bfd2444abce039a1c6fa8e60|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 @@ -29168,7 +29487,7 @@ msgstr "" "quel cadre de ML et n'importe quel langage de programmation." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +msgid "|e7cec00a114b48359935c6510595132e|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 @@ -30379,16 +30698,6 @@ msgstr "" #~ "typically display information on your " #~ "terminal as follows:" #~ msgstr "" -#~ "contenant des informations pertinentes, " -#~ "notamment : le niveau du message " -#~ "de journal (par exemple :code:`INFO`, " -#~ ":code:`DEBUG`), un horodatage, la ligne " -#~ "à partir de laquelle l'enregistrement a" -#~ " eu lieu, ainsi que le message " -#~ "de journal lui-même. De cette " -#~ "façon, le logger afficherait typiquement " -#~ "des informations sur ton terminal comme" -#~ " suit :" #~ msgid "Saving log to file" #~ msgstr "Enregistrement du journal dans un fichier" @@ -30409,22 +30718,6 @@ msgstr "" #~ "`_" #~ " function. For example:" #~ msgstr "" -#~ "Par défaut, le journal de Flower " -#~ "est affiché dans le terminal à " -#~ "partir duquel tu as lancé ta " -#~ "charge de travail d'apprentissage fédéré. " -#~ "Cela s'applique à la fois à la " -#~ "fédération basée sur gRPC (c'est-à-dire " -#~ "lorsque tu fais :code:`fl.server.start_server`) " -#~ "et à l'utilisation du " -#~ ":code:`VirtualClientEngine` (c'est-à-dire lorsque tu" -#~ " fais :code:`fl.simulation.start_simulation`). Dans " -#~ "certaines situations, tu peux vouloir " -#~ "sauvegarder ce journal sur le disque." -#~ " Tu peux le faire en appelant " -#~ "la fonction `fl.common.logger.configure() " -#~ "`_." -#~ " Par exemple :" #~ msgid "" #~ "With the above, Flower will record " @@ -30495,24 +30788,6 @@ msgstr "" #~ "should you wish to backup or " #~ "analyze the logs somewhere else." #~ msgstr "" -#~ "La fonction :code:`fl.common.logger.configure` " -#~ "permet également de spécifier un hôte" -#~ " vers lequel les journaux peuvent " -#~ "être envoyés (via :code:`POST`) par " -#~ "l'intermédiaire d'un :code:`logging.handler.HTTPHandler`" -#~ " natif de Python. Il s'agit d'une " -#~ "fonction particulièrement utile dans les " -#~ "charges de travail d'apprentissage fédéré " -#~ "basées sur :code:`gRPC` où la collecte" -#~ " des journaux de toutes les entités" -#~ " (c'est-à-dire le serveur et les " -#~ "clients) pourrait s'avérer fastidieuse. Notez" -#~ " que dans la simulation Flower, le" -#~ " serveur affiche automatiquement tous les" -#~ " journaux. Vous pouvez toujours spécifier" -#~ " un :code:`HTTPHandler` si vous souhaitez" -#~ " sauvegarder ou analyser les journaux " -#~ "à un autre endroit." #~ msgid "Enable SSL connections" #~ msgstr "Collecte centralisée des données" @@ -31411,24 +31686,9 @@ msgstr "" #~ "`_ to " #~ "learn more about the app." #~ msgstr "" -#~ "Créons un nouveau projet d'application " -#~ "dans Xcode et ajoutons :code:`flwr` " -#~ "comme dépendance dans ton projet. Pour" -#~ " notre application, nous stockerons la " -#~ "logique de notre application dans " -#~ ":code:`FLiOSModel.swift` et les éléments de" -#~ " l'interface utilisateur dans " -#~ ":code:`ContentView.swift`.Nous nous concentrerons " -#~ "davantage sur :code:`FLiOSModel.swift` dans ce" -#~ " quickstart. N'hésite pas à te " -#~ "référer à l'`exemple de code complet " -#~ "`_ pour" -#~ " en savoir plus sur l'application." #~ msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" #~ msgstr "" -#~ "Importe les paquets liés à Flower " -#~ "et CoreML dans :code:`FLiOSModel.swift` :" #~ msgid "" #~ "Then add the mlmodel to the " @@ -31445,20 +31705,6 @@ msgstr "" #~ " preprocessing is done inside " #~ ":code:`DataLoader.swift`." #~ msgstr "" -#~ "Ensuite, ajoute le mlmodel au projet " -#~ "simplement par glisser-déposer, le " -#~ "mlmodel sera regroupé à l'intérieur de" -#~ " l'application lors du déploiement sur " -#~ "ton appareil iOS. Nous devons passer " -#~ "l'url pour accéder au mlmodel et " -#~ "exécuter les processus d'apprentissage " -#~ "automatique CoreML, elle peut être " -#~ "récupérée en appelant la fonction " -#~ ":code:`Bundle.main.url`. Pour l'ensemble de " -#~ "données MNIST, nous devons le prétraiter" -#~ " dans l'objet :code:`MLBatchProvider`. Le " -#~ "prétraitement est effectué à l'intérieur " -#~ "de :code:`DataLoader.swift`." #~ msgid "" #~ "Since CoreML does not allow the " @@ -31486,10 +31732,6 @@ msgstr "" #~ " by passing our Flower client to " #~ "the function :code:`startFlwrGRPC`." #~ msgstr "" -#~ "Lance ensuite le client Flower gRPC " -#~ "et commence à communiquer avec le " -#~ "serveur en passant notre client Flower" -#~ " à la fonction :code:`startFlwrGRPC`." #~ msgid "" #~ "That's it for the client. We only" @@ -31504,17 +31746,6 @@ msgstr "" #~ "button to start the federated learning" #~ " process." #~ msgstr "" -#~ "C'est tout pour le client. Il nous" -#~ " suffit d'implémenter :code:`Client` ou " -#~ "d'appeler le :code:`MLFlwrClient` fourni et" -#~ " d'appeler :code:`startFlwrGRPC()`. L'attribut " -#~ ":code:`hostname` et :code:`port` indique au" -#~ " client à quel serveur se connecter." -#~ " Pour ce faire, il suffit d'entrer" -#~ " le nom d'hôte et le port dans" -#~ " l'application avant de cliquer sur " -#~ "le bouton de démarrage pour lancer " -#~ "le processus d'apprentissage fédéré." #~ msgid "" #~ "Once the server is running we can" @@ -37531,3 +37762,670 @@ msgstr "" #~ msgid "|ed9aae51da70428eab7eef32f21e819e|" #~ msgstr "" +#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +#~ msgstr "" + +#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" +#~ msgstr "" + +#~ msgid "|cc080a555947492fa66131dc3a967603|" +#~ msgstr "" + +#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" +#~ msgstr "" + +#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" +#~ msgstr "" + +#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +#~ msgstr "" + +#~ msgid "|163117eb654a4273babba413cf8065f5|" +#~ msgstr "" + +#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +#~ msgstr "" + +#~ msgid "|f403fcd69e4e44409627e748b404c086|" +#~ msgstr "" + +#~ msgid "|4b00fe63870145968f8443619a792a42|" +#~ msgstr "" + +#~ msgid "|368378731066486fa4397e89bc6b870c|" +#~ msgstr "" + +#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" +#~ msgstr "" + +#~ msgid "|82324b9af72a4582a81839d55caab767|" +#~ msgstr "" + +#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +#~ msgstr "" + +#~ msgid "" +#~ "Install `xz` (to install different " +#~ "Python versions) and `pandoc` to build" +#~ " the docs::" +#~ msgstr "" + +#~ msgid "" +#~ "Ensure you system (Ubuntu 22.04+) is " +#~ "up-to-date, and you have all " +#~ "necessary packages::" +#~ msgstr "" + +#~ msgid "" +#~ "Let's create the Python environment for" +#~ " all-things Flower. If you wish " +#~ "to use :code:`pyenv`, we provide two " +#~ "convenience scripts that you can use." +#~ " If you prefer using something else" +#~ " than :code:`pyenv`, create a new " +#~ "environment, activate and skip to the" +#~ " last point where all packages are" +#~ " installed." +#~ msgstr "" + +#~ msgid "" +#~ "If in a hurry, bypass the hook " +#~ "using ``--no-verify`` with the ``git " +#~ "commit`` command. ::" +#~ msgstr "" + +#~ msgid "" +#~ "Flower's documentation uses `Sphinx " +#~ "`_. There's no " +#~ "convenience script to re-build the " +#~ "documentation yet, but it's pretty " +#~ "easy::" +#~ msgstr "" + +#~ msgid "" +#~ "Some quickstart examples may have " +#~ "limitations or requirements that prevent " +#~ "them from running on every environment." +#~ " For more information, please see " +#~ "`Limitations`_." +#~ msgstr "" + +#~ msgid "" +#~ "Change the application code. For " +#~ "example, change the ``seed`` in " +#~ "``quickstart_docker/task.py`` to ``43`` and " +#~ "save it:" +#~ msgstr "" + +#~ msgid ":code:`fit`" +#~ msgstr ":code:`fit`" + +#~ msgid "" +#~ "\\small\n" +#~ "\\frac{∆ \\times \\sqrt{2 \\times " +#~ "\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +#~ "\n" +#~ msgstr "" + +#~ msgid "Enable node authentication in :code:`SuperLink`" +#~ msgstr "" + +#~ msgid "" +#~ "To enable node authentication, first you" +#~ " need to configure SSL/TLS connections " +#~ "to secure the SuperLink<>SuperNode " +#~ "communication. You can find the complete" +#~ " guide `here `_. After " +#~ "configuring secure connections, you can " +#~ "enable client authentication in a " +#~ "long-running Flower :code:`SuperLink`. Use " +#~ "the following terminal command to start" +#~ " a Flower :code:`SuperNode` that has " +#~ "both secure connections and node " +#~ "authentication enabled:" +#~ msgstr "" + +#~ msgid "" +#~ "The first flag :code:`--auth-list-" +#~ "public-keys` expects a path to a " +#~ "CSV file storing all known node " +#~ "public keys. You need to store all" +#~ " known node public keys that are " +#~ "allowed to participate in a federation" +#~ " in one CSV file (:code:`.csv`)." +#~ msgstr "" + +#~ msgid "" +#~ "The second and third flags :code" +#~ ":`--auth-superlink-private-key` and :code" +#~ ":`--auth-superlink-public-key` expect paths" +#~ " to the server's private and public" +#~ " keys. For development purposes, you " +#~ "can generate a private and public " +#~ "key pair using :code:`ssh-keygen -t " +#~ "ecdsa -b 384`." +#~ msgstr "" + +#~ msgid "Enable node authentication in :code:`SuperNode`" +#~ msgstr "" + +#~ msgid "" +#~ "Similar to the long-running Flower " +#~ "server (:code:`SuperLink`), you can easily " +#~ "enable node authentication in the " +#~ "long-running Flower client (:code:`SuperNode`)." +#~ " Use the following terminal command " +#~ "to start an authenticated :code:`SuperNode`:" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`--auth-supernode-private-key` " +#~ "flag expects a path to the node's" +#~ " private key file and the :code" +#~ ":`--auth-supernode-public-key` flag expects" +#~ " a path to the node's public " +#~ "key file. For development purposes, you" +#~ " can generate a private and public" +#~ " key pair using :code:`ssh-keygen -t" +#~ " ecdsa -b 384`." +#~ msgstr "" + +#~ msgid "" +#~ "You should now have learned how to" +#~ " start a long-running Flower server" +#~ " (:code:`SuperLink`) and client " +#~ "(:code:`SuperNode`) with node authentication " +#~ "enabled. You should also know the " +#~ "significance of the private key and " +#~ "store it safely to minimize security " +#~ "risks." +#~ msgstr "" + +#~ msgid "" +#~ "If you have not added ``conda-" +#~ "forge`` to your channels, you will " +#~ "first need to run the following::" +#~ msgstr "" + +#~ msgid "" +#~ "Once the ``conda-forge`` channel has " +#~ "been enabled, ``flwr`` can be installed" +#~ " with ``conda``::" +#~ msgstr "" + +#~ msgid "or with ``mamba``::" +#~ msgstr "" + +#~ msgid "" +#~ "For central DP with server-side " +#~ "clipping, there are two :code:`Strategy` " +#~ "classes that act as wrappers around " +#~ "the actual :code:`Strategy` instance (for " +#~ "example, :code:`FedAvg`). The two wrapper " +#~ "classes are " +#~ ":code:`DifferentialPrivacyServerSideFixedClipping` and " +#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` for " +#~ "fixed and adaptive clipping." +#~ msgstr "" + +#~ msgid "" +#~ "The code sample below enables the " +#~ ":code:`FedAvg` strategy to use server-" +#~ "side fixed clipping using the " +#~ ":code:`DifferentialPrivacyServerSideFixedClipping` wrapper " +#~ "class. The same approach can be " +#~ "used with " +#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` by " +#~ "adjusting the corresponding input parameters." +#~ msgstr "" + +#~ msgid "" +#~ "For central DP with client-side " +#~ "clipping, the server sends the clipping" +#~ " value to selected clients on each" +#~ " round. Clients can use existing " +#~ "Flower :code:`Mods` to perform the " +#~ "clipping. Two mods are available for " +#~ "fixed and adaptive client-side clipping:" +#~ " :code:`fixedclipping_mod` and " +#~ ":code:`adaptiveclipping_mod` with corresponding " +#~ "server-side wrappers " +#~ ":code:`DifferentialPrivacyClientSideFixedClipping` and " +#~ ":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +#~ msgstr "" + +#~ msgid "" +#~ "The code sample below enables the " +#~ ":code:`FedAvg` strategy to use differential" +#~ " privacy with client-side fixed " +#~ "clipping using both the " +#~ ":code:`DifferentialPrivacyClientSideFixedClipping` wrapper " +#~ "class and, on the client, " +#~ ":code:`fixedclipping_mod`:" +#~ msgstr "" + +#~ msgid "" +#~ "In addition to the server-side " +#~ "strategy wrapper, the :code:`ClientApp` needs" +#~ " to configure the matching " +#~ ":code:`fixedclipping_mod` to perform the " +#~ "client-side clipping:" +#~ msgstr "" + +#~ msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +#~ msgstr "" + +#~ msgid "" +#~ "Note that since version :code:`1.11.0`, " +#~ ":code:`flower-server-app` no longer " +#~ "supports passing a reference to a " +#~ "`ServerApp` attribute. Instead, you need " +#~ "to pass the path to Flower app " +#~ "via the argument :code:`--app`. This is" +#~ " the path to a directory containing" +#~ " a `pyproject.toml`. You can create a" +#~ " valid Flower app by executing " +#~ ":code:`flwr new` and following the " +#~ "prompt." +#~ msgstr "" + +#~ msgid "" +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this information beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." +#~ msgstr "" + +#~ msgid "" +#~ "Prior to local training, we need " +#~ "to load the MNIST dataset, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning, " +#~ "and partition the dataset for FL. " +#~ "This can be conveniently achieved using" +#~ " `Flower Datasets `_." +#~ " The :code:`FederatedDataset.load_partition()` method" +#~ " loads the partitioned training set " +#~ "for each partition ID defined in " +#~ "the :code:`--partition-id` argument." +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a federated XGBoost model" +#~ " on HIGGS dataset using Flower and" +#~ " :code:`xgboost` package. We use a " +#~ "simple example (`full code xgboost-" +#~ "quickstart `_) with two *clients* " +#~ "and one *server* to demonstrate how " +#~ "federated XGBoost works, and then we " +#~ "dive into a more complex example " +#~ "(`full code xgboost-comprehensive " +#~ "`_) to run various experiments." +#~ msgstr "" + +#~ msgid "" +#~ "In this example, we split the " +#~ "dataset into 30 partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=30)`)." +#~ " Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`partition_id`:" +#~ msgstr "" + +#~ msgid "" +#~ "After that, we do train/test splitting" +#~ " on the given partition (client's " +#~ "local data), and transform data format" +#~ " for :code:`xgboost` package." +#~ msgstr "" + +#~ msgid "" +#~ "The functions of :code:`train_test_split` and" +#~ " :code:`transform_dataset_to_dmatrix` are defined " +#~ "as below:" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`num_local_round` represents the " +#~ "number of iterations for local tree " +#~ "boost. We use CPU for the training" +#~ " in default. One can shift it " +#~ "to GPU by setting :code:`tree_method` to" +#~ " :code:`gpu_hist`. We use AUC as " +#~ "evaluation metric." +#~ msgstr "" + +#~ msgid "" +#~ "After loading the dataset we define " +#~ "the Flower client. We follow the " +#~ "general rule to define :code:`XgbClient` " +#~ "class inherited from :code:`fl.client.Client`." +#~ msgstr "" + +#~ msgid "" +#~ "All required parameters defined above " +#~ "are passed to :code:`XgbClient`'s constructor." +#~ msgstr "" + +#~ msgid "" +#~ "Then, we override :code:`get_parameters`, " +#~ ":code:`fit` and :code:`evaluate` methods " +#~ "insides :code:`XgbClient` class as follows." +#~ msgstr "" + +#~ msgid "" +#~ "Unlike neural network training, XGBoost " +#~ "trees are not started from a " +#~ "specified random weights. In this case," +#~ " we do not use :code:`get_parameters` " +#~ "and :code:`set_parameters` to initialise model" +#~ " parameters for XGBoost. As a result," +#~ " let's return an empty tensor in " +#~ ":code:`get_parameters` when it is called " +#~ "by the server at the first round." +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. From " +#~ "the second round, we load the " +#~ "global model sent from server to " +#~ "new build Booster object, and then " +#~ "update model weights on local training" +#~ " data with function :code:`local_boost` as" +#~ " follows:" +#~ msgstr "" + +#~ msgid "" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`bst_input.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`evaluate`, after loading the " +#~ "global model, we call :code:`bst.eval_set` " +#~ "function to conduct evaluation on valid" +#~ " set. The AUC value will be " +#~ "returned." +#~ msgstr "" + +#~ msgid "" +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients. The :code:`config_func` function is" +#~ " to return the current FL round " +#~ "number to client's :code:`fit()` and " +#~ ":code:`evaluate()` methods." +#~ msgstr "" + +#~ msgid "" +#~ "In file :code:`flwr.server.strategy.fedxgb_bagging.py`," +#~ " we define :code:`FedXgbBagging` inherited " +#~ "from :code:`flwr.server.strategy.FedAvg`. Then, we" +#~ " override the :code:`aggregate_fit`, " +#~ ":code:`aggregate_evaluate` and :code:`evaluate` " +#~ "methods as follows:" +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`aggregate_fit`, we sequentially " +#~ "aggregate the clients' XGBoost trees by" +#~ " calling :code:`aggregate()` function:" +#~ msgstr "" + +#~ msgid "" +#~ "In this function, we first fetch " +#~ "the number of trees and the number" +#~ " of parallel trees for the current" +#~ " and previous model by calling " +#~ ":code:`_get_tree_nums`. Then, the fetched " +#~ "information will be aggregated. After " +#~ "that, the trees (containing model " +#~ "weights) are aggregated to generate a" +#~ " new tree model." +#~ msgstr "" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated XGBoost " +#~ "system. The AUC values can be " +#~ "checked in :code:`metrics_distributed`. One " +#~ "can see that the average AUC " +#~ "increases over FL rounds." +#~ msgstr "" + +#~ msgid "" +#~ "To do this, we first customise a" +#~ " :code:`ClientManager` in :code:`server_utils.py`:" +#~ msgstr "" + +#~ msgid "" +#~ "The customised :code:`ClientManager` samples " +#~ "all available clients in each FL " +#~ "round based on the order of " +#~ "connection to the server. Then, we " +#~ "define a new strategy :code:`FedXgbCyclic` " +#~ "in :code:`flwr.server.strategy.fedxgb_cyclic.py`, in " +#~ "order to sequentially select only one" +#~ " client in given round and pass " +#~ "the received model to next client." +#~ msgstr "" + +#~ msgid "" +#~ "Unlike the original :code:`FedAvg`, we " +#~ "don't perform aggregation here. Instead, " +#~ "we just make a copy of the " +#~ "received client model as global model" +#~ " by overriding :code:`aggregate_fit`." +#~ msgstr "" + +#~ msgid "" +#~ "Also, the customised :code:`configure_fit` and" +#~ " :code:`configure_evaluate` methods ensure the" +#~ " clients to be sequentially selected " +#~ "given FL round:" +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`dataset.py`, we have a " +#~ "function :code:`instantiate_partitioner` to " +#~ "instantiate the data partitioner based " +#~ "on the given :code:`num_partitions` and " +#~ ":code:`partitioner_type`. Currently, we provide " +#~ "four supported partitioner type to " +#~ "simulate the uniformity/non-uniformity in " +#~ "data quantity (uniform, linear, square, " +#~ "exponential)." +#~ msgstr "" + +#~ msgid "" +#~ "To facilitate centralised evaluation, we " +#~ "define a function in :code:`server_utils.py`:" +#~ msgstr "" + +#~ msgid "" +#~ "This function returns a evaluation " +#~ "function which instantiates a :code:`Booster`" +#~ " object and loads the global model" +#~ " weights to it. The evaluation is " +#~ "conducted by calling :code:`eval_set()` " +#~ "method, and the tested AUC value " +#~ "is reported." +#~ msgstr "" + +#~ msgid "" +#~ "As for distributed evaluation on the " +#~ "clients, it's same as the quick-" +#~ "start example by overriding the " +#~ ":code:`evaluate()` method insides the " +#~ ":code:`XgbClient` class in :code:`client_utils.py`." +#~ msgstr "" + +#~ msgid "" +#~ "We also provide an example code " +#~ "(:code:`sim.py`) to use the simulation " +#~ "capabilities of Flower to simulate " +#~ "federated XGBoost training on either a" +#~ " single machine or a cluster of " +#~ "machines." +#~ msgstr "" + +#~ msgid "" +#~ "After importing all required packages, " +#~ "we define a :code:`main()` function to" +#~ " perform the simulation process:" +#~ msgstr "" + +#~ msgid "" +#~ "We first load the dataset and " +#~ "perform data partitioning, and the " +#~ "pre-processed data is stored in a " +#~ ":code:`list`. After the simulation begins, " +#~ "the clients won't need to pre-" +#~ "process their partitions again." +#~ msgstr "" + +#~ msgid "" +#~ "After that, we start the simulation " +#~ "by calling :code:`fl.simulation.start_simulation`:" +#~ msgstr "" + +#~ msgid "" +#~ "One of key parameters for " +#~ ":code:`start_simulation` is :code:`client_fn` which" +#~ " returns a function to construct a" +#~ " client. We define it as follows:" +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`utils.py`, we define the " +#~ "arguments parsers for clients, server " +#~ "and simulation, allowing users to " +#~ "specify different experimental settings. Let's" +#~ " first see the sever side:" +#~ msgstr "" + +#~ msgid "" +#~ "This allows user to specify training " +#~ "strategies / the number of total " +#~ "clients / FL rounds / participating " +#~ "clients / clients for evaluation, and" +#~ " evaluation fashion. Note that with " +#~ ":code:`--centralised-eval`, the sever will " +#~ "do centralised evaluation and all " +#~ "functionalities for client evaluation will " +#~ "be disabled." +#~ msgstr "" + +#~ msgid "" +#~ "This defines various options for client" +#~ " data partitioning. Besides, clients also" +#~ " have an option to conduct evaluation" +#~ " on centralised test set by setting" +#~ " :code:`--centralised-eval`, as well as " +#~ "an option to perform scaled learning " +#~ "rate based on the number of " +#~ "clients by setting :code:`--scaled-lr`." +#~ msgstr "" + +#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" +#~ msgstr "" + +#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" +#~ msgstr "" + +#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" +#~ msgstr "" + +#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" +#~ msgstr "" + +#~ msgid "|d7efb5705dd3467f991ed23746824a07|" +#~ msgstr "" + +#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" +#~ msgstr "" + +#~ msgid "|a80714782dde439ab73936518f91fc3c|" +#~ msgstr "" + +#~ msgid "|c62080ca6197473da57d191c8225a9d9|" +#~ msgstr "" + +#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" +#~ msgstr "" + +#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" +#~ msgstr "" + +#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" +#~ msgstr "" + +#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" +#~ msgstr "" + +#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" +#~ msgstr "" + +#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" +#~ msgstr "" + +#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" +#~ msgstr "" + +#~ msgid "|131af8322dc5466b827afd24be98f8c0|" +#~ msgstr "" + +#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" +#~ msgstr "" + +#~ msgid "|d62da263071d45a496f543e41fce3a19|" +#~ msgstr "" + +#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" +#~ msgstr "" + +#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" +#~ msgstr "" + +#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" +#~ msgstr "" + +#~ msgid "|b021ff9d25814458b1e631f8985a648b|" +#~ msgstr "" + +#~ msgid "|e6ca84e1df244f238288a768352678e5|" +#~ msgstr "" + +#~ msgid "|39c2422082554a21963baffb33a0d057|" +#~ msgstr "" + +#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" +#~ msgstr "" + +#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" +#~ msgstr "" + +#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" +#~ msgstr "" + +#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" +#~ msgstr "" + diff --git a/doc/locales/ko/LC_MESSAGES/framework-docs.po b/doc/locales/ko/LC_MESSAGES/framework-docs.po index 4c738e16b434..4cdf1c565be6 100644 --- a/doc/locales/ko/LC_MESSAGES/framework-docs.po +++ b/doc/locales/ko/LC_MESSAGES/framework-docs.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-09-24 00:29+0000\n" +"POT-Creation-Date: 2024-09-27 00:30+0000\n" "PO-Revision-Date: 2024-08-23 13:09+0000\n" "Last-Translator: Seulki Yun \n" "Language: ko\n" @@ -62,23 +62,23 @@ msgid "" "or not by reading the Flower source code." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:22 +#: ../../source/contributor-explanation-public-and-private-apis.rst:23 #, fuzzy msgid "Flower public API" msgstr "Flower ClientApp." -#: ../../source/contributor-explanation-public-and-private-apis.rst:24 +#: ../../source/contributor-explanation-public-and-private-apis.rst:25 msgid "Flower has a well-defined public API. Let's look at this in more detail." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:28 +#: ../../source/contributor-explanation-public-and-private-apis.rst:29 msgid "" "Every component that is reachable by recursively following " "``__init__.__all__`` starting from the root package (``flwr``) is part of" " the public API." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:30 +#: ../../source/contributor-explanation-public-and-private-apis.rst:32 msgid "" "If you want to determine whether a component " "(class/function/generator/...) is part of the public API or not, you need" @@ -86,13 +86,13 @@ msgid "" "src/py/flwr`` to look at the Python sub-packages contained ``flwr``:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:43 +#: ../../source/contributor-explanation-public-and-private-apis.rst:46 msgid "" "Contrast this with the definition of ``__all__`` in the root " "``src/py/flwr/__init__.py``:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:55 +#: ../../source/contributor-explanation-public-and-private-apis.rst:59 msgid "" "You can see that ``flwr`` has six subpackages (``cli``, ``client``, " "``common``, ``proto``, ``server``, ``simulation``), but only four of them" @@ -100,7 +100,7 @@ msgid "" "``simulation``)." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:57 +#: ../../source/contributor-explanation-public-and-private-apis.rst:63 msgid "" "What does this mean? It means that ``client``, ``common``, ``server`` and" " ``simulation`` are part of the public API, but ``cli`` and ``proto`` are" @@ -111,21 +111,21 @@ msgid "" "even be removed completely." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:62 +#: ../../source/contributor-explanation-public-and-private-apis.rst:70 msgid "Therefore, as a Flower user:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:64 +#: ../../source/contributor-explanation-public-and-private-apis.rst:72 msgid "``from flwr import client`` ✅ Ok, you're importing a public API." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:65 +#: ../../source/contributor-explanation-public-and-private-apis.rst:73 msgid "" "``from flwr import proto`` ❌ Not recommended, you're importing a private " "API." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:67 +#: ../../source/contributor-explanation-public-and-private-apis.rst:75 msgid "" "What about components that are nested deeper in the hierarchy? Let's look" " at Flower strategies to see another typical pattern. Flower strategies " @@ -134,7 +134,7 @@ msgid "" "``src/py/flwr/server/strategy/__init__.py``:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:81 +#: ../../source/contributor-explanation-public-and-private-apis.rst:91 msgid "" "What's notable here is that all strategies are implemented in dedicated " "modules (e.g., ``fedavg.py``). In ``__init__.py``, we *import* the " @@ -146,33 +146,33 @@ msgid "" "the public API (as long as we update the import path in ``__init__.py``)." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:86 +#: ../../source/contributor-explanation-public-and-private-apis.rst:99 msgid "Therefore:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:88 +#: ../../source/contributor-explanation-public-and-private-apis.rst:101 msgid "" "``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a " "class that is part of the public API." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:89 +#: ../../source/contributor-explanation-public-and-private-apis.rst:103 msgid "" "``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're " "importing a private module." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:91 +#: ../../source/contributor-explanation-public-and-private-apis.rst:106 msgid "" "This approach is also implemented in the tooling that automatically " "builds API reference docs." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:94 +#: ../../source/contributor-explanation-public-and-private-apis.rst:110 msgid "Flower public API of private packages" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:96 +#: ../../source/contributor-explanation-public-and-private-apis.rst:112 msgid "" "We also use this to define the public API of private subpackages. Public," " in this context, means the API that other ``flwr`` subpackages should " @@ -180,14 +180,14 @@ msgid "" "not exported via ``src/py/flwr/server/__init__.py``'s ``__all__``)." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:100 +#: ../../source/contributor-explanation-public-and-private-apis.rst:117 msgid "" "Still, the private sub-package ``flwr.server.driver`` defines a " "\"public\" API using ``__all__`` in " "``src/py/flwr/server/driver/__init__.py``:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:114 +#: ../../source/contributor-explanation-public-and-private-apis.rst:132 msgid "" "The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` " "are never used by Flower framework users, only by other parts of the " @@ -199,7 +199,7 @@ msgid "" "``InMemoryDriver`` class definition)." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:117 +#: ../../source/contributor-explanation-public-and-private-apis.rst:140 msgid "" "This is because ``flwr.server.driver`` defines a public interface for " "other ``flwr`` subpackages. This allows codeowners of " @@ -234,16 +234,16 @@ msgid "" "development environment." msgstr "시작하기 전에, 로컬 개발 환경에서 몇 가지 전제 조건을 충족해야 합니다." -#: ../../source/contributor-how-to-build-docker-images.rst:12 +#: ../../source/contributor-how-to-build-docker-images.rst:13 #, fuzzy msgid "Clone the ``flower`` repository." msgstr "플라워 레포지토리를 클론합니다." -#: ../../source/contributor-how-to-build-docker-images.rst:18 +#: ../../source/contributor-how-to-build-docker-images.rst:19 msgid "Verify the Docker daemon is running." msgstr "Docker 데몬이 실행 중인지 확인하십시오." -#: ../../source/contributor-how-to-build-docker-images.rst:20 +#: ../../source/contributor-how-to-build-docker-images.rst:21 msgid "" "The build instructions that assemble the images are located in the " "respective Dockerfiles. You can find them in the subdirectories of " @@ -252,7 +252,7 @@ msgstr "" "이미지들을 조합하는 빌드 명령어들은 해당 Dockerfile에 있습니다. \"src/docker\" 의 하위 디렉토리에서 찾을 수 " "있습니다." -#: ../../source/contributor-how-to-build-docker-images.rst:23 +#: ../../source/contributor-how-to-build-docker-images.rst:24 msgid "" "Flower Docker images are configured via build arguments. Through build " "arguments, we can make the creation of images more flexible. For example," @@ -267,141 +267,141 @@ msgstr "" " 있습니다. 일부 빌드 전달인자들은 기본값이며, 이미지를 빌드할 때 지정해야 합니다. 각 이미지에 사용할 수 있는 모든 빌드 " "전달인자는 아래 표 중에 있습니다." -#: ../../source/contributor-how-to-build-docker-images.rst:30 +#: ../../source/contributor-how-to-build-docker-images.rst:32 #, fuzzy msgid "Building the Base Image" msgstr "기본 이미지 빌드" -#: ../../source/contributor-how-to-build-docker-images.rst:36 -#: ../../source/contributor-how-to-build-docker-images.rst:98 +#: ../../source/contributor-how-to-build-docker-images.rst:38 +#: ../../source/contributor-how-to-build-docker-images.rst:104 msgid "Build argument" msgstr "빌드 전달인자" -#: ../../source/contributor-how-to-build-docker-images.rst:37 -#: ../../source/contributor-how-to-build-docker-images.rst:99 +#: ../../source/contributor-how-to-build-docker-images.rst:39 +#: ../../source/contributor-how-to-build-docker-images.rst:105 msgid "Description" msgstr "설명" -#: ../../source/contributor-how-to-build-docker-images.rst:38 -#: ../../source/contributor-how-to-build-docker-images.rst:100 +#: ../../source/contributor-how-to-build-docker-images.rst:40 +#: ../../source/contributor-how-to-build-docker-images.rst:106 msgid "Required" msgstr "필수" -#: ../../source/contributor-how-to-build-docker-images.rst:39 -#: ../../source/contributor-how-to-build-docker-images.rst:101 -#: ../../source/docker/persist-superlink-state.rst:18 -#: ../../source/docker/pin-version.rst:11 +#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:107 +#: ../../source/docker/persist-superlink-state.rst:19 +#: ../../source/docker/pin-version.rst:12 #: ../../source/docker/set-environment-variables.rst:8 msgid "Example" msgstr "예시" -#: ../../source/contributor-how-to-build-docker-images.rst:40 +#: ../../source/contributor-how-to-build-docker-images.rst:42 msgid "``DISTRO``" msgstr "``DISTRO``" -#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:43 msgid "The Linux distribution to use as the base image." msgstr "기본 이미지 사용을 위한 Linux 배포판." -#: ../../source/contributor-how-to-build-docker-images.rst:42 -#: ../../source/contributor-how-to-build-docker-images.rst:46 -#: ../../source/contributor-how-to-build-docker-images.rst:50 -#: ../../source/contributor-how-to-build-docker-images.rst:66 -#: ../../source/contributor-how-to-build-docker-images.rst:70 -#: ../../source/contributor-how-to-build-docker-images.rst:104 +#: ../../source/contributor-how-to-build-docker-images.rst:44 +#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:52 +#: ../../source/contributor-how-to-build-docker-images.rst:68 +#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:110 msgid "No" msgstr "아니오" -#: ../../source/contributor-how-to-build-docker-images.rst:43 +#: ../../source/contributor-how-to-build-docker-images.rst:45 msgid "``ubuntu``" msgstr "``ubuntu``" -#: ../../source/contributor-how-to-build-docker-images.rst:44 +#: ../../source/contributor-how-to-build-docker-images.rst:46 msgid "``DISTRO_VERSION``" msgstr "``DISTRO_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:45 +#: ../../source/contributor-how-to-build-docker-images.rst:47 msgid "Version of the Linux distribution." msgstr "Linux 배포판 버전." -#: ../../source/contributor-how-to-build-docker-images.rst:47 +#: ../../source/contributor-how-to-build-docker-images.rst:49 msgid ":substitution-code:`|ubuntu_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:50 msgid "``PYTHON_VERSION``" msgstr "``PYTHON_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:49 +#: ../../source/contributor-how-to-build-docker-images.rst:51 msgid "Version of ``python`` to be installed." msgstr "설치 된 ``python`` 버전." -#: ../../source/contributor-how-to-build-docker-images.rst:51 +#: ../../source/contributor-how-to-build-docker-images.rst:53 msgid "``3.11`` or ``3.11.1``" msgstr "``3.11`` 또는 ``3.11.1``" -#: ../../source/contributor-how-to-build-docker-images.rst:52 +#: ../../source/contributor-how-to-build-docker-images.rst:54 msgid "``PIP_VERSION``" msgstr "``PIP_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:53 +#: ../../source/contributor-how-to-build-docker-images.rst:55 msgid "Version of ``pip`` to be installed." msgstr "설치 된 ``pip`` 버전." -#: ../../source/contributor-how-to-build-docker-images.rst:54 -#: ../../source/contributor-how-to-build-docker-images.rst:58 -#: ../../source/contributor-how-to-build-docker-images.rst:62 -#: ../../source/contributor-how-to-build-docker-images.rst:108 +#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:60 +#: ../../source/contributor-how-to-build-docker-images.rst:64 +#: ../../source/contributor-how-to-build-docker-images.rst:114 msgid "Yes" msgstr "예" -#: ../../source/contributor-how-to-build-docker-images.rst:55 +#: ../../source/contributor-how-to-build-docker-images.rst:57 msgid ":substitution-code:`|pip_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:58 msgid "``SETUPTOOLS_VERSION``" msgstr "``SETUPTOOLS_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:57 +#: ../../source/contributor-how-to-build-docker-images.rst:59 msgid "Version of ``setuptools`` to be installed." msgstr "설치 된 ``setuptools`` 버전." -#: ../../source/contributor-how-to-build-docker-images.rst:59 +#: ../../source/contributor-how-to-build-docker-images.rst:61 #, fuzzy msgid ":substitution-code:`|setuptools_version|`" msgstr "``SETUPTOOLS_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:60 +#: ../../source/contributor-how-to-build-docker-images.rst:62 msgid "``FLWR_VERSION``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:61 +#: ../../source/contributor-how-to-build-docker-images.rst:63 msgid "Version of Flower to be installed." msgstr "설치 된 Flower 버전." -#: ../../source/contributor-how-to-build-docker-images.rst:63 +#: ../../source/contributor-how-to-build-docker-images.rst:65 msgid ":substitution-code:`|stable_flwr_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:64 +#: ../../source/contributor-how-to-build-docker-images.rst:66 msgid "``FLWR_PACKAGE``" msgstr "``FLWR_PACKAGE``" -#: ../../source/contributor-how-to-build-docker-images.rst:65 +#: ../../source/contributor-how-to-build-docker-images.rst:67 msgid "The Flower package to be installed." msgstr "설치 할 Flower 패키지." -#: ../../source/contributor-how-to-build-docker-images.rst:67 +#: ../../source/contributor-how-to-build-docker-images.rst:69 msgid "``flwr`` or ``flwr-nightly``" msgstr "``flwr`` 또는 ``flwr-nightly``" -#: ../../source/contributor-how-to-build-docker-images.rst:68 +#: ../../source/contributor-how-to-build-docker-images.rst:70 #, fuzzy msgid "``FLWR_VERSION_REF``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:69 +#: ../../source/contributor-how-to-build-docker-images.rst:71 msgid "" "A `direct reference " "`_를 참조하세요." -#: ../../source/contributor-how-to-contribute-translations.rst:29 +#: ../../source/contributor-how-to-contribute-translations.rst:28 msgid "" "Once you are signed in to Weblate, you can navigate to the `Flower " "Framework project `_로 이동할 수 " "있습니다. 여기에서 웹사이트에 있는 다양한 기존 언어들을 확인할 수 있습니다." -#: ../../source/contributor-how-to-contribute-translations.rst:34 +#: ../../source/contributor-how-to-contribute-translations.rst:32 msgid "" "Once you have selected the language you want to contribute to, you should" " see a similar interface to this:" msgstr "기여하고자 하는 언어를 선택하면, 다음과 같은 인터페이스가 나타납니다:" -#: ../../source/contributor-how-to-contribute-translations.rst:39 +#: ../../source/contributor-how-to-contribute-translations.rst:37 msgid "" "The most straight forward option here is to click on the ``Translate`` " "button on the top right (in the ``Translation status`` section). This " @@ -564,11 +564,11 @@ msgstr "" "여기서 가장 간단한 옵션은 오른쪽 상단(``Translation status`` 부분)에 있는 ``Translate`` 버튼을 " "클릭하는 것 입니다. 번역되지 않은 문장에 대한 번역 인터페이스로 자동으로 이동합니다." -#: ../../source/contributor-how-to-contribute-translations.rst:43 +#: ../../source/contributor-how-to-contribute-translations.rst:41 msgid "This is what the interface looks like:" msgstr "인터페이스는 다음과 같습니다:" -#: ../../source/contributor-how-to-contribute-translations.rst:47 +#: ../../source/contributor-how-to-contribute-translations.rst:45 msgid "" "You input your translation in the text box at the top and then, once you " "are happy with it, you either press ``Save and continue`` (to save the " @@ -582,7 +582,7 @@ msgstr "" "볼 수 있도록 번역을 제안 항목에 추가), ``Skip``(아무것도 저장하지 않고 다음 미번역 문장으로 이동) 중 하나를 선택하면 " "됩니다." -#: ../../source/contributor-how-to-contribute-translations.rst:54 +#: ../../source/contributor-how-to-contribute-translations.rst:51 msgid "" "In order to help with the translations, you can see on the bottom the " "``Nearby strings``, the ``Comments`` (from other contributors), the " @@ -593,14 +593,14 @@ msgstr "" "번역에 도움을 주기위해 하단에서 `주변 문자열``, ``의견``(다른 기여자의), ``자동 제안``(기계 번역의), ``다른 " "언어``의 번역 및 해당 문장의 번역``히스토리``를 볼 수 있습니다." -#: ../../source/contributor-how-to-contribute-translations.rst:59 +#: ../../source/contributor-how-to-contribute-translations.rst:56 msgid "" "On the right, under the ``String information`` section, you can also " "click the link under ``Source string location`` in order to view the " "source of the doc file containing the string." msgstr "오른쪽의 ``문자열 정보``에서 ``원본 문자열 위치``를 클릭하여 해당 문장이 포함된 문서의 파일 소스를 볼 수도 있습니다." -#: ../../source/contributor-how-to-contribute-translations.rst:63 +#: ../../source/contributor-how-to-contribute-translations.rst:60 msgid "" "For more information about translating using Weblate, you can check out " "this `in-depth guide " @@ -609,11 +609,11 @@ msgstr "" "Weblate를 통한 번역에 대한 자세한 정보는 `in-depth guide " "`_를 확인하세요." -#: ../../source/contributor-how-to-contribute-translations.rst:67 +#: ../../source/contributor-how-to-contribute-translations.rst:64 msgid "Add new languages" msgstr "새 언어 추가" -#: ../../source/contributor-how-to-contribute-translations.rst:69 +#: ../../source/contributor-how-to-contribute-translations.rst:66 msgid "" "If you want to add a new language, you will first have to contact us, " "either on `Slack `_, or by opening an issue" @@ -637,17 +637,17 @@ msgstr "" "위해 VSCode Remote Containers 확장을 사용하고 있습니다. 그것이 무엇인지 알아보기 위해 다음 인용문을 " "읽어보세요:" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:7 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:8 +#, fuzzy msgid "" "The Visual Studio Code Remote - Containers extension lets you use a " "Docker container as a fully-featured development environment. It allows " "you to open any folder inside (or mounted into) a container and take " "advantage of Visual Studio Code's full feature set. A " -":code:`devcontainer.json` file in your project tells VS Code how to " -"access (or create) a development container with a well-defined tool and " -"runtime stack. This container can be used to run an application or to " -"separate tools, libraries, or runtimes needed for working with a " -"codebase." +"``devcontainer.json`` file in your project tells VS Code how to access " +"(or create) a development container with a well-defined tool and runtime " +"stack. This container can be used to run an application or to separate " +"tools, libraries, or runtimes needed for working with a codebase." msgstr "" "Visual Studio Code Remote - 컨테이너 확장을 사용하면 Docker 컨테이너를 모든 기능을 갖춘 개발 환경으로 " "사용할 수 있습니다. 이 확장 기능을 사용하면 컨테이너 내부(또는 컨테이너에 마운트된)의 모든 폴더를 열고 Visual Studio" @@ -655,7 +655,7 @@ msgstr "" "도구와 런타임 스택을 사용하여 개발 컨테이너에 액세스(또는 생성)하는 방법을 VS Code에 알려줍니다. 이 컨테이너는 " "애플리케이션을 실행하거나 코드베이스 작업에 필요한 도구, 라이브러리 또는 런타임을 분리하는 데 사용할 수 있습니다." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:9 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:16 msgid "" "Workspace files are mounted from the local file system or copied or " "cloned into the container. Extensions are installed and run inside the " @@ -667,24 +667,25 @@ msgstr "" "실행되며, 도구, 플랫폼 및 파일 시스템에 완전한 접근 권한을 갖습니다. 이는 다른 컨테이너에 연결하는 것만으로 전체 개발 환경을 " "원활하게 전환할 수 있음을 의미합니다." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:22 msgid "" "Source: `Official VSCode documentation " "`_" msgstr "출처 : 공식 VSCode 문서" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:26 msgid "Getting started" msgstr "시작하기" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:17 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:28 +#, fuzzy msgid "" -"Configuring and setting up the :code:`Dockerfile` as well the " -"configuration for the devcontainer can be a bit more involved. The good " -"thing is you don't have to do it. Usually it should be enough to install " -"`Docker `_ on your system and " -"ensure its available on your command line. Additionally, install the " -"`VSCode Containers Extension `_ on your system and ensure its" +" available on your command line. Additionally, install the `VSCode " +"Containers Extension `_." msgstr "" "`Dockerfile`을 설정하고 구성하는 것과 개발 컨테이너 구성은 약간 복잡할 수 있습니다. 다행히도, 이를 직접 할 필요는 " @@ -692,7 +693,7 @@ msgstr "" "설치하고 커맨드 라인에서 사용할 수 있는지 확인하는 것으로 충분합니다. 추가로 `VSCode Containers Extension " "`_을 설치하세요." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:19 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:35 msgid "" "Now you should be good to go. When starting VSCode, it will ask you to " "run in the container environment and - if you confirm - automatically " @@ -705,13 +706,13 @@ msgstr "" " 것입니다. VSCode에 수동으로 개발 컨테이너를 사용하도록 지시하려면, 확장을 설치한 후, VSCode 창의 왼쪽 하단에 있는 " "초록색 부을 클릭하고 *(Re)Open Folder in Container* 옵션을 선택하세요." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:21 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:41 msgid "" "In some cases your setup might be more involved. For those cases consult " "the following sources:" msgstr "경우에 따라 설정이 더 복잡할 수도 있습니다. 이러한 경우에는 다음 소스를 참조하세요:" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:44 msgid "" "`Developing inside a Container " "`_" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:46 msgid "" "`Remote development in Containers " "`_" @@ -748,13 +749,13 @@ msgstr "" "다음, 재설치하세요(``poetry 설치``이전에 ``poetry.lock`` (``rm poetry.lock``)를 제거하는 것을" " 잊지 마세요)." -#: ../../source/contributor-how-to-install-development-versions.rst:12 +#: ../../source/contributor-how-to-install-development-versions.rst:14 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (without " "extras)" msgstr "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (extras 제외)" -#: ../../source/contributor-how-to-install-development-versions.rst:13 +#: ../../source/contributor-how-to-install-development-versions.rst:15 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (with extras)" @@ -762,17 +763,17 @@ msgstr "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (extras 포함)" -#: ../../source/contributor-how-to-install-development-versions.rst:15 +#: ../../source/contributor-how-to-install-development-versions.rst:18 msgid "" "Install ``flwr`` from a local copy of the Flower source code via " "``pyproject.toml``:" msgstr "``pyproject.toml``을 통해 Flower 소스 코드의 로컬 복사본에서 ``flwr``을 설치하세요:" -#: ../../source/contributor-how-to-install-development-versions.rst:17 +#: ../../source/contributor-how-to-install-development-versions.rst:20 msgid "``flwr = { path = \"../../\", develop = true }`` (without extras)" msgstr "``flwr = { path = \"../../\", develop = true }`` (extras 제외)" -#: ../../source/contributor-how-to-install-development-versions.rst:18 +#: ../../source/contributor-how-to-install-development-versions.rst:21 msgid "" "``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " "}`` (with extras)" @@ -780,11 +781,11 @@ msgstr "" "``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " "}`` (extras 포함)" -#: ../../source/contributor-how-to-install-development-versions.rst:20 +#: ../../source/contributor-how-to-install-development-versions.rst:23 msgid "Install ``flwr`` from a local wheel file via ``pyproject.toml``:" msgstr "``pyproject.toml``을 통해 로컬 wheel file에서 ``flwr``을 설치하세요:" -#: ../../source/contributor-how-to-install-development-versions.rst:22 +#: ../../source/contributor-how-to-install-development-versions.rst:25 msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (without" " extras)" @@ -792,7 +793,7 @@ msgstr "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (extras " "제외)" -#: ../../source/contributor-how-to-install-development-versions.rst:23 +#: ../../source/contributor-how-to-install-development-versions.rst:26 msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\", extras = " "[\"simulation\"] }`` (with extras)" @@ -800,7 +801,7 @@ msgstr "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\", extras = " "[\"simulation\"] }`` (extras 포함)" -#: ../../source/contributor-how-to-install-development-versions.rst:25 +#: ../../source/contributor-how-to-install-development-versions.rst:29 msgid "" "Please refer to the Poetry documentation for further details: `Poetry " "Dependency Specification `_" -#: ../../source/contributor-how-to-install-development-versions.rst:28 +#: ../../source/contributor-how-to-install-development-versions.rst:33 msgid "Using pip (recommended on Colab)" msgstr "pip 사용하기(Colab에서 권장)" -#: ../../source/contributor-how-to-install-development-versions.rst:30 +#: ../../source/contributor-how-to-install-development-versions.rst:35 msgid "Install a ``flwr`` pre-release from PyPI:" msgstr "PyPI에서 ``flwr`` 사전 릴리즈를 설치하기:" -#: ../../source/contributor-how-to-install-development-versions.rst:32 +#: ../../source/contributor-how-to-install-development-versions.rst:37 msgid "``pip install -U --pre flwr`` (without extras)" msgstr "``pip install -U --pre flwr`` (extras 제외)" -#: ../../source/contributor-how-to-install-development-versions.rst:33 +#: ../../source/contributor-how-to-install-development-versions.rst:38 msgid "``pip install -U --pre 'flwr[simulation]'`` (with extras)" msgstr "``pip install -U --pre 'flwr[simulation]'`` (extras 포함)" -#: ../../source/contributor-how-to-install-development-versions.rst:35 +#: ../../source/contributor-how-to-install-development-versions.rst:40 msgid "" "Python packages can be installed from git repositories. Use one of the " "following commands to install the Flower directly from GitHub." @@ -833,17 +834,17 @@ msgstr "" "Python 패키지는 git 저장소에서 설치할 수 있습니다. 다음 명령어 중 하나를 사용하여 GitHub에서 직접 Flower를 " "설치하세요." -#: ../../source/contributor-how-to-install-development-versions.rst:37 +#: ../../source/contributor-how-to-install-development-versions.rst:43 msgid "Install ``flwr`` from the default GitHub branch (``main``):" msgstr "기본 GitHub branch (``main``)에서 ``flwr`` 를 설치하기:" -#: ../../source/contributor-how-to-install-development-versions.rst:39 +#: ../../source/contributor-how-to-install-development-versions.rst:45 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git`` (without " "extras)" msgstr "``pip install flwr@git+https://github.com/adap/flower.git`` (extras 제외)" -#: ../../source/contributor-how-to-install-development-versions.rst:40 +#: ../../source/contributor-how-to-install-development-versions.rst:46 msgid "" "``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" " (with extras)" @@ -851,11 +852,11 @@ msgstr "" "``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" " (extras 포함)" -#: ../../source/contributor-how-to-install-development-versions.rst:42 +#: ../../source/contributor-how-to-install-development-versions.rst:49 msgid "Install ``flwr`` from a specific GitHub branch (``branch-name``):" msgstr "특정 GitHub branch (``branch-name``)에서 ``flwr`` 설치하기:" -#: ../../source/contributor-how-to-install-development-versions.rst:44 +#: ../../source/contributor-how-to-install-development-versions.rst:51 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " "(without extras)" @@ -863,7 +864,7 @@ msgstr "" "``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " "(extras 제외)" -#: ../../source/contributor-how-to-install-development-versions.rst:45 +#: ../../source/contributor-how-to-install-development-versions.rst:53 msgid "" "``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" "@branch-name'`` (with extras)" @@ -871,11 +872,11 @@ msgstr "" "``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" "@branch-name'`` (extras 포함)" -#: ../../source/contributor-how-to-install-development-versions.rst:49 +#: ../../source/contributor-how-to-install-development-versions.rst:57 msgid "Open Jupyter Notebooks on Google Colab" msgstr "Google Colab에서 Jupyter Notebooks 열기" -#: ../../source/contributor-how-to-install-development-versions.rst:51 +#: ../../source/contributor-how-to-install-development-versions.rst:59 msgid "" "Open the notebook ``doc/source/tutorial-series-get-started-with-flower-" "pytorch.ipynb``:" @@ -883,7 +884,7 @@ msgstr "" "``doc/source/tutorial-series-get-started-with-flower-" "pytorch.ipynb``notebook을 엽니다:" -#: ../../source/contributor-how-to-install-development-versions.rst:53 +#: ../../source/contributor-how-to-install-development-versions.rst:61 msgid "" "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" "/tutorial-series-get-started-with-flower-pytorch.ipynb" @@ -891,7 +892,7 @@ msgstr "" "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" "/tutorial-series-get-started-with-flower-pytorch.ipynb" -#: ../../source/contributor-how-to-install-development-versions.rst:55 +#: ../../source/contributor-how-to-install-development-versions.rst:63 msgid "" "Open a development version of the same notebook from branch `branch-name`" " by changing ``main`` to ``branch-name`` (right after ``blob``):" @@ -899,7 +900,7 @@ msgstr "" "``main``을 ``branch-name``(``blob`` 바로 뒤)으로 변경하여 동일한 notebook의 개발 버전을 브랜치 " "`branch-name`에서 엽니다 :" -#: ../../source/contributor-how-to-install-development-versions.rst:57 +#: ../../source/contributor-how-to-install-development-versions.rst:66 msgid "" "https://colab.research.google.com/github/adap/flower/blob/branch-" "name/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb" @@ -907,21 +908,21 @@ msgstr "" "https://colab.research.google.com/github/adap/flower/blob/branch-" "name/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb" -#: ../../source/contributor-how-to-install-development-versions.rst:59 +#: ../../source/contributor-how-to-install-development-versions.rst:68 msgid "Install a `whl` on Google Colab:" msgstr "Google Colab에서 `whl` 설치하기:" -#: ../../source/contributor-how-to-install-development-versions.rst:61 +#: ../../source/contributor-how-to-install-development-versions.rst:70 msgid "" "In the vertical icon grid on the left hand side, select ``Files`` > " "``Upload to session storage``" msgstr "왼쪽의 수직 아이콘 그리드에서 ``Files`` > ``Upload to session storage``를 선택하세요" -#: ../../source/contributor-how-to-install-development-versions.rst:62 +#: ../../source/contributor-how-to-install-development-versions.rst:72 msgid "Upload the whl (e.g., ``flwr-1.8.0-py3-none-any.whl``)" msgstr "whl (예:``flwr-1.8.0-py3-none-any.whl``)을 업로드하세요" -#: ../../source/contributor-how-to-install-development-versions.rst:63 +#: ../../source/contributor-how-to-install-development-versions.rst:73 msgid "" "Change ``!pip install -q 'flwr[simulation]' torch torchvision " "matplotlib`` to ``!pip install -q 'flwr-1.8.0-py3-none-" @@ -941,11 +942,11 @@ msgid "" "change in the future." msgstr "이 문서는 현재 릴리즈 과정을 설명합니다. 이는 앞으로 변경될 수도 있습니다." -#: ../../source/contributor-how-to-release-flower.rst:7 +#: ../../source/contributor-how-to-release-flower.rst:8 msgid "During the release" msgstr "릴리즈 동안에" -#: ../../source/contributor-how-to-release-flower.rst:9 +#: ../../source/contributor-how-to-release-flower.rst:10 msgid "" "The version number of a release is stated in ``pyproject.toml``. To " "release a new version of Flower, the following things need to happen (in " @@ -954,7 +955,7 @@ msgstr "" "릴리즈의 버전 번호는 ``pyproject.toml``에 명시되어 있습니다. Flower의 새 버전을 릴리즈하려면 다음 작업이 " "순서대로 수행되어야 합니다:" -#: ../../source/contributor-how-to-release-flower.rst:11 +#: ../../source/contributor-how-to-release-flower.rst:13 msgid "" "Run ``python3 src/py/flwr_tool/update_changelog.py `` in " "order to add every new change to the changelog (feel free to make manual " @@ -964,7 +965,7 @@ msgstr "" "src/py/flwr_tool/update_changelog.py ``을 실행합니다 (변경 로그가 " "만족스러워질 때까지 수동으로 변경해도 됩니다)." -#: ../../source/contributor-how-to-release-flower.rst:12 +#: ../../source/contributor-how-to-release-flower.rst:16 msgid "" "Once the changelog has been updated with all the changes, run ``./dev" "/prepare-release-changelog.sh v``, where ```` " @@ -978,7 +979,7 @@ msgstr "" "버전 번호입니다 (앞에 ``v``가 추가된 것을 주의하세요). 이 명령어는 변경 로그의 ``Unreleased``헤더를 해당 버전과" " 현재 날짜로 교체하고, 기여자들에게 감사 메시지가 추가됩니다. 이러한 변경 사항으로 pull request합니다." -#: ../../source/contributor-how-to-release-flower.rst:13 +#: ../../source/contributor-how-to-release-flower.rst:22 msgid "" "Once the pull request is merged, tag the release commit with the version " "number as soon as the PR is merged: ``git tag v`` (notice " @@ -990,93 +991,93 @@ msgstr "" "v`` (버전 번호 앞에 ``v``가 추가된 것을 확인), 그 다음 ``git push --tags``. " "이렇게 하면 올바른 아티팩트와 변경 로그의 관련 부분이 포함된 초안 릴리즈가 GitHub에 생성됩니다." -#: ../../source/contributor-how-to-release-flower.rst:14 +#: ../../source/contributor-how-to-release-flower.rst:26 msgid "Check the draft release on GitHub, and if everything is good, publish it." msgstr "GitHub에서 릴리즈 초안을 확인하고, 모든 것이 양호하면 게시하세요." -#: ../../source/contributor-how-to-release-flower.rst:17 +#: ../../source/contributor-how-to-release-flower.rst:29 msgid "After the release" msgstr "릴리즈 후에" -#: ../../source/contributor-how-to-release-flower.rst:19 +#: ../../source/contributor-how-to-release-flower.rst:31 msgid "Create a pull request which contains the following changes:" msgstr "다음 변경 사항이 포함된 pull request를 만듭니다:" -#: ../../source/contributor-how-to-release-flower.rst:21 +#: ../../source/contributor-how-to-release-flower.rst:33 msgid "Increase the minor version in ``pyproject.toml`` by one." msgstr "``pyproject.toml``의 마이너 버전을 하나씩 늘립니다." -#: ../../source/contributor-how-to-release-flower.rst:22 +#: ../../source/contributor-how-to-release-flower.rst:34 msgid "Update all files which contain the current version number if necessary." msgstr "필요한 경우 현재 버전 번호가 포함된 모든 파일을 업데이트합니다." -#: ../../source/contributor-how-to-release-flower.rst:23 +#: ../../source/contributor-how-to-release-flower.rst:35 msgid "Add a new ``Unreleased`` section in ``changelog.md``." msgstr "``changelog.md``에 ``Unreleased`` 섹션을 새로 추가합니다." -#: ../../source/contributor-how-to-release-flower.rst:25 +#: ../../source/contributor-how-to-release-flower.rst:37 msgid "" "Merge the pull request on the same day (i.e., before a new nightly " "release gets published to PyPI)." msgstr "pull request를 같은 날(즉, 새로운 nightly 릴리즈가 PyPI에 게시되기 전에) 병합하세요." -#: ../../source/contributor-how-to-release-flower.rst:28 +#: ../../source/contributor-how-to-release-flower.rst:41 msgid "Publishing a pre-release" msgstr "사전 릴리즈 게시" -#: ../../source/contributor-how-to-release-flower.rst:31 +#: ../../source/contributor-how-to-release-flower.rst:44 msgid "Pre-release naming" msgstr "사전 릴리즈 이름" -#: ../../source/contributor-how-to-release-flower.rst:33 +#: ../../source/contributor-how-to-release-flower.rst:46 msgid "" "PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" " MUST use one of the following naming patterns:" msgstr "PyPI는 사전 릴리즈(알파, 베타, 릴리스 후보)를 지원합니다. 사전 릴리즈는 반드시 다음 명명 패턴 중 하나를 사용해야 합니다:" -#: ../../source/contributor-how-to-release-flower.rst:35 +#: ../../source/contributor-how-to-release-flower.rst:49 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" msgstr "Alpha: ``MAJOR.MINOR.PATCHaN``" -#: ../../source/contributor-how-to-release-flower.rst:36 +#: ../../source/contributor-how-to-release-flower.rst:50 msgid "Beta: ``MAJOR.MINOR.PATCHbN``" msgstr "Beta: ``MAJOR.MINOR.PATCHbN``" -#: ../../source/contributor-how-to-release-flower.rst:37 +#: ../../source/contributor-how-to-release-flower.rst:51 msgid "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" msgstr "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" -#: ../../source/contributor-how-to-release-flower.rst:39 +#: ../../source/contributor-how-to-release-flower.rst:53 msgid "Examples include:" msgstr "예시:" -#: ../../source/contributor-how-to-release-flower.rst:41 +#: ../../source/contributor-how-to-release-flower.rst:55 msgid "``1.0.0a0``" msgstr "``1.0.0a0``" -#: ../../source/contributor-how-to-release-flower.rst:42 +#: ../../source/contributor-how-to-release-flower.rst:56 msgid "``1.0.0b0``" msgstr "``1.0.0b0``" -#: ../../source/contributor-how-to-release-flower.rst:43 +#: ../../source/contributor-how-to-release-flower.rst:57 msgid "``1.0.0rc0``" msgstr "``1.0.0rc0``" -#: ../../source/contributor-how-to-release-flower.rst:44 +#: ../../source/contributor-how-to-release-flower.rst:58 msgid "``1.0.0rc1``" msgstr "``1.0.0rc1``" -#: ../../source/contributor-how-to-release-flower.rst:46 +#: ../../source/contributor-how-to-release-flower.rst:60 msgid "" "This is in line with PEP-440 and the recommendations from the Python " "Packaging Authority (PyPA):" msgstr "이는 PEP-440 및 Python Packaging Authority (PyPA)의 권장 사항과 일치합니다:" -#: ../../source/contributor-how-to-release-flower.rst:49 +#: ../../source/contributor-how-to-release-flower.rst:63 msgid "`PEP-440 `_" msgstr "`PEP-440 `_" -#: ../../source/contributor-how-to-release-flower.rst:50 +#: ../../source/contributor-how-to-release-flower.rst:64 msgid "" "`PyPA Choosing a versioning scheme " "`_" -#: ../../source/contributor-how-to-release-flower.rst:52 +#: ../../source/contributor-how-to-release-flower.rst:67 msgid "" "Note that the approach defined by PyPA is not compatible with SemVer " "2.0.0 spec, for details consult the `Semantic Versioning Specification " @@ -1096,15 +1097,15 @@ msgstr "" "관리 사양 `_ (특히 항목 11이 " "우선순위)을 참조하세요." -#: ../../source/contributor-how-to-release-flower.rst:55 +#: ../../source/contributor-how-to-release-flower.rst:73 msgid "Pre-release classification" msgstr "사전 릴리즈 분류" -#: ../../source/contributor-how-to-release-flower.rst:57 +#: ../../source/contributor-how-to-release-flower.rst:75 msgid "Should the next pre-release be called alpha, beta, or release candidate?" msgstr "다음 사전 릴리즈를 알파, 베타 또는 릴리스 후보라고 불러야 하나요?" -#: ../../source/contributor-how-to-release-flower.rst:59 +#: ../../source/contributor-how-to-release-flower.rst:77 msgid "" "RC: feature complete, no known issues (apart from issues that are " "classified as \"won't fix\" for the next stable release) - if no issues " @@ -1113,11 +1114,11 @@ msgstr "" "RC: 기능 완료, 알려진 문제 없음(다음 stable 릴리즈에서 \"수정되지 않음\"으로 분류된 문제 제외) - 문제가 나타나지 " "않으면 다음 stable 릴리즈가 됩니다" -#: ../../source/contributor-how-to-release-flower.rst:60 +#: ../../source/contributor-how-to-release-flower.rst:80 msgid "Beta: feature complete, allowed to have known issues" msgstr "베타: 기능 완료, 알려진 문제 발생 가능" -#: ../../source/contributor-how-to-release-flower.rst:61 +#: ../../source/contributor-how-to-release-flower.rst:81 msgid "Alpha: not feature complete, allowed to have known issues" msgstr "알파: 기능 미완성, 알려진 문제가 있을 수 있음" @@ -1135,12 +1136,12 @@ msgstr "" "가상 환경 내에서 파이썬 설정을 실행하는 것이 좋습니다. 이 가이드에서는 pyenv virtualenv, poetry 또는 " "Anaconda를 사용하여 가상 환경을 만드는 세 가지 예제를 보여줍니다. 안내를 따르거나 원하는 설정을 선택할 수 있습니다." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:9 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:10 msgid "Python Version" msgstr "Python 버전" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:11 -#: ../../source/how-to-install-flower.rst:8 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:12 +#: ../../source/how-to-install-flower.rst:7 msgid "" "Flower requires at least `Python 3.9 `_, " "but `Python 3.10 `_ or above is " @@ -1149,7 +1150,7 @@ msgstr "" "Flower는 `Python 3.9 `_이상이 필요하지만, `Python " "3.10 `_이상을 권장합니다." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:14 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:17 msgid "" "Due to a known incompatibility with `ray " "`_, we currently recommend utilizing at " @@ -1160,11 +1161,11 @@ msgstr "" " 시뮬레이션을 실행할 때는 최대 `Python 3.11 `_을 사용하는 것이" " 좋습니다." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:19 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:22 msgid "Virtualenv with Pyenv/Virtualenv" msgstr "Pyenv/Virtualenv를 사용한 가상 환경" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:21 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:24 msgid "" "One of the recommended virtual environment is `pyenv " "`_/`virtualenv `_입니다. 자세한 내용은 `Flower " "examples `_를 참조하세요." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:23 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 msgid "" "Once Pyenv is set up, you can use it to install `Python Version 3.10 " "`_ or above:" @@ -1183,19 +1184,19 @@ msgstr "" "Pyenv가 설정되면 이를 사용하여 'Python 버전 3.10 `_ " "이상'을 설치할 수 있습니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 msgid "Create the virtualenv with:" msgstr "가상 환경을 만듭니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:42 msgid "Activate the virtualenv by running the following command:" msgstr "다음 명령을 실행하여 가상 환경을 활성화합니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:44 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:49 msgid "Virtualenv with Poetry" msgstr "Poetry를 사용한 가상 환경" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:46 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:51 msgid "" "The Flower examples are based on `Poetry `_ to manage dependencies. After installing Poetry you " @@ -1204,21 +1205,22 @@ msgstr "" "Flower examples은 의존성을 관리하기 위해 `Poetry `_를 기반으로 합니다. Poetry를 설치한 후 가상 환경을 생성하기만 하면 됩니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:52 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:58 msgid "" "If you open a new terminal you can activate the previously created " "virtual environment with the following command:" msgstr "새 터미널을 열면 다음 명령을 사용하여 이전에 생성한 가상 환경을 활성화할 수 있습니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:60 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:66 msgid "Virtualenv with Anaconda" msgstr "Anaconda를 사용한 가상 환경" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:62 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#, fuzzy msgid "" "If you prefer to use Anaconda for your virtual environment then install " "and setup the `conda `_ package. After setting it up you can " +"/user-guide/install/index.html>`_ package. After setting it up you can " "create a virtual environment with:" msgstr "" "가상 환경에서 Anaconda를 사용하려면 `conda " @@ -1226,15 +1228,15 @@ msgstr "" "guide/install/index.html>`_ 패키지를 설치 및 설정하세요. 설정 후 다음을 사용하여 가상 환경을 만들 수 " "있습니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 msgid "and activate the virtual environment with:" msgstr "그 후 가상 환경을 활성화합니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:83 msgid "And then?" msgstr "그다음은?" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:78 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:85 msgid "" "As soon as you created your virtual environment you clone one of the " "`Flower examples `_." @@ -1246,11 +1248,11 @@ msgstr "" msgid "Write documentation" msgstr "문서 작성" -#: ../../source/contributor-how-to-write-documentation.rst:6 +#: ../../source/contributor-how-to-write-documentation.rst:5 msgid "Project layout" msgstr "프로젝트 레이아웃" -#: ../../source/contributor-how-to-write-documentation.rst:8 +#: ../../source/contributor-how-to-write-documentation.rst:7 msgid "" "The Flower documentation lives in the ``doc`` directory. The Sphinx-based" " documentation system supports both reStructuredText (``.rst`` files) and" @@ -1260,7 +1262,7 @@ msgstr "" "텍스트(``.rst`` 파일)와 Markdown(``.md`` 파일)을 모두 지원합니다." #: ../../source/contributor-how-to-write-documentation.rst:10 -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:169 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:193 msgid "" "Note that, in order to build the documentation locally (with ``poetry run" " make html``, like described below), `Pandoc " @@ -1270,20 +1272,20 @@ msgstr "" "로컬에서 문서를 작성하려면(아래 설명과 같이 ``poetry run make html``로) `Pandoc " "`_이 시스템에 설치되어 있어야 합니다." -#: ../../source/contributor-how-to-write-documentation.rst:14 +#: ../../source/contributor-how-to-write-documentation.rst:15 msgid "Edit an existing page" msgstr "기존 페이지 편집" -#: ../../source/contributor-how-to-write-documentation.rst:16 +#: ../../source/contributor-how-to-write-documentation.rst:17 msgid "Edit an existing ``.rst`` (or ``.md``) file under ``doc/source/``" msgstr "doc/source/``에서 기존 ``.rst``(또는 ``.md``) 파일을 편집합니다" -#: ../../source/contributor-how-to-write-documentation.rst:17 +#: ../../source/contributor-how-to-write-documentation.rst:18 #: ../../source/contributor-how-to-write-documentation.rst:27 msgid "Compile the docs: ``cd doc``, then ``poetry run make html``" msgstr "문서를 컴파일합니다: ``cd doc``, ``poetry run make html`` 순으로 컴파일합니다" -#: ../../source/contributor-how-to-write-documentation.rst:18 +#: ../../source/contributor-how-to-write-documentation.rst:19 #: ../../source/contributor-how-to-write-documentation.rst:28 msgid "Open ``doc/build/html/index.html`` in the browser to check the result" msgstr "브라우저에서 ``doc/build/html/index.html``을 열어 결과를 확인합니다" @@ -1318,11 +1320,11 @@ msgstr "" "Flower에 대한 기여를 환영합니다! 하지만 어디서부터 시작해야 할지 알기란 쉽지 않습니다. 그래서 저희는 여러분의 PR이 " "Flower 코드베이스에 채택될 가능성을 높이기 위해 어디서부터 시작해야 하는지 몇 가지 권장 사항을 정리해 보았습니다." -#: ../../source/contributor-ref-good-first-contributions.rst:11 +#: ../../source/contributor-ref-good-first-contributions.rst:9 msgid "Where to start" msgstr "시작 위치" -#: ../../source/contributor-ref-good-first-contributions.rst:13 +#: ../../source/contributor-ref-good-first-contributions.rst:11 msgid "" "Until the Flower core library matures it will be easier to get PR's " "accepted if they only touch non-core areas of the codebase. Good " @@ -1331,23 +1333,23 @@ msgstr "" "Flower 코어 라이브러리가 완성될 때까지는 코드베이스의 비핵심 영역만 건드리는 것이 PR을 승인받기가 더 쉬울 것입니다. " "시작하기에 좋은 후보자는 다음과 같습니다:" -#: ../../source/contributor-ref-good-first-contributions.rst:17 +#: ../../source/contributor-ref-good-first-contributions.rst:14 msgid "Documentation: What's missing? What could be expressed more clearly?" msgstr "문서: 무엇이 누락되었나요? 무엇을 더 명확하게 표현할 수 있을까요?" -#: ../../source/contributor-ref-good-first-contributions.rst:18 +#: ../../source/contributor-ref-good-first-contributions.rst:15 msgid "Baselines: See below." msgstr "Baselines: 아래를 참조하세요." -#: ../../source/contributor-ref-good-first-contributions.rst:19 +#: ../../source/contributor-ref-good-first-contributions.rst:16 msgid "Examples: See below." msgstr "예시: 아래를 참조하세요." -#: ../../source/contributor-ref-good-first-contributions.rst:23 +#: ../../source/contributor-ref-good-first-contributions.rst:19 msgid "Request for Flower Baselines" msgstr "Flower Baselines 요청" -#: ../../source/contributor-ref-good-first-contributions.rst:25 +#: ../../source/contributor-ref-good-first-contributions.rst:21 msgid "" "If you are not familiar with Flower Baselines, you should probably check-" "out our `contributing guide for baselines " @@ -1357,7 +1359,7 @@ msgstr "" "`_를 " "확인해보세요." -#: ../../source/contributor-ref-good-first-contributions.rst:27 +#: ../../source/contributor-ref-good-first-contributions.rst:25 msgid "" "You should then check out the open `issues " "`_" @@ -1370,7 +1372,7 @@ msgstr "" " baseline 요청을 확인해야 합니다. 작업하고 싶은 기준선을 찾았지만 담당자가 없는 경우, 자유롭게 자신에게 할당하고 작업을 " "시작하세요!" -#: ../../source/contributor-ref-good-first-contributions.rst:31 +#: ../../source/contributor-ref-good-first-contributions.rst:30 msgid "" "Otherwise, if you don't find a baseline you'd like to work on, be sure to" " open a new issue with the baseline request template!" @@ -1416,12 +1418,13 @@ msgstr "" "때문에 다이어그램과 추상화가 실제로는 정확하지 않을 수 있습니다. SecAgg 프로토콜은 SecAgg+ 프로토콜의 특수한 경우로 " "간주할 수 있습니다." -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:8 -msgid "The :code:`SecAgg+` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:9 +#, fuzzy +msgid "The ``SecAgg+`` abstraction" msgstr "The :code:`SecAgg+` 추상화" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:10 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:11 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:163 msgid "" "In this implementation, each client will be assigned with a unique index " "(int) for secure aggregation, and thus many python dictionaries used have" @@ -1430,18 +1433,19 @@ msgstr "" "구현에서는 각 클라이언트에 secure aggregation를 위한 고유 인덱스(int)가 할당되므로 사용되는 많은 파이썬 " "dictionaries에는 ClientProxy 타입이 아닌 int 타입의 키가 있습니다." -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:65 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:198 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:67 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:204 msgid "" "The Flower server will execute and process received results in the " "following order:" msgstr "Flower 서버는 수신된 결과를 다음 순서로 실행하고 처리합니다:" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:159 -msgid "The :code:`LightSecAgg` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#, fuzzy +msgid "The ``LightSecAgg`` abstraction" msgstr "The :code:`LightSecAgg` 추상" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:271 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:277 msgid "Types" msgstr "타입" @@ -1455,7 +1459,7 @@ msgid "" "are not used to contributing to GitHub projects." msgstr "이 가이드는 Flower에 참여하고 싶지만 GitHub 프로젝트에 기여하는 데 익숙하지 않은 분들을 위한 것입니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:6 +#: ../../source/contributor-tutorial-contribute-on-github.rst:7 msgid "" "If you're familiar with how contributing on GitHub works, you can " "directly checkout our :doc:`getting started guide for contributors " @@ -1464,15 +1468,15 @@ msgstr "" "깃허브에서 기여하는 방식에 익숙하다면 :doc:`기여자를 위한 시작 가이드`를 직접 확인하세요." -#: ../../source/contributor-tutorial-contribute-on-github.rst:10 +#: ../../source/contributor-tutorial-contribute-on-github.rst:12 msgid "Setting up the repository" msgstr "레포지토리 설정하기" -#: ../../source/contributor-tutorial-contribute-on-github.rst:21 +#: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "**Create a GitHub account and setup Git**" msgstr "**GitHub 계정을 만들고 Git을 설정합니다**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:13 +#: ../../source/contributor-tutorial-contribute-on-github.rst:15 msgid "" "Git is a distributed version control tool. This allows for an entire " "codebase's history to be stored and every developer's machine. It is a " @@ -1484,7 +1488,7 @@ msgstr "" "컴퓨터에 설치해야 하는 소프트웨어로, 이 `가이드 `_를 따라 설정할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:16 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "" "GitHub, itself, is a code hosting platform for version control and " "collaboration. It allows for everyone to collaborate and work from " @@ -1493,13 +1497,13 @@ msgstr "" "GitHub는 그 자체로 버전 관리 및 협업을 위한 코드 호스팅 플랫폼입니다. 누구나 원격 레포지토리에서 어디서든 협업하고 작업할 " "수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:18 +#: ../../source/contributor-tutorial-contribute-on-github.rst:25 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." msgstr "아직 계정을 만들지 않았다면 `GitHub `_에서 계정을 만들어야 합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:20 +#: ../../source/contributor-tutorial-contribute-on-github.rst:28 msgid "" "The idea behind the generic Git and GitHub workflow boils down to this: " "you download code from a remote repository on GitHub, make changes " @@ -1509,14 +1513,15 @@ msgstr "" "일반적인 Git 및 GitHub 워크플로우의 기본 개념은 다음과 같이 요약됩니다. GitHub의 원격 레포지토리에서 코드를 " "다운로드하고 로컬에서 변경한 후 Git을 사용하여 추적한 다음 새 기록을 다시 GitHub에 업로드하는 것입니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:32 +#: ../../source/contributor-tutorial-contribute-on-github.rst:42 msgid "**Forking the Flower repository**" msgstr "**Flower 레포지토리 포크하기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:24 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 +#, fuzzy msgid "" "A fork is a personal copy of a GitHub repository. To create one for " -"Flower, you must navigate to ``_ (while " +"Flower, you must navigate to https://github.com/adap/flower (while " "connected to your GitHub account) and click the ``Fork`` button situated " "on the top right of the page." msgstr "" @@ -1524,7 +1529,7 @@ msgstr "" "``_로 이동하여(GitHub 계정에 연결된 상태에서) 페이지 오른쪽 " "상단에 있는 ``포크`` 버튼을 클릭해야 합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:29 +#: ../../source/contributor-tutorial-contribute-on-github.rst:38 msgid "" "You can change the name if you want, but this is not necessary as this " "version of Flower will be yours and will sit inside your own account " @@ -1534,11 +1539,11 @@ msgstr "" "원하는 경우 이름을 변경할 수 있지만, 이 버전의 Flower는 자신의 계정(즉, 자신의 리포지토리 목록)에 위치하게 되므로 변경할" " 필요는 없습니다. 만들기가 완료되면 왼쪽 상단에Flower 버전이 표시되는 것을 볼 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 msgid "**Cloning your forked repository**" msgstr "**포크된 레포지토리 클론하기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:35 +#: ../../source/contributor-tutorial-contribute-on-github.rst:45 msgid "" "The next step is to download the forked repository on your machine to be " "able to make changes to it. On your forked repository page, you should " @@ -1548,27 +1553,27 @@ msgstr "" "다음 단계는 컴퓨터에서 포크된 레포지토리를 변경할 수 있도록 다운로드하는 것입니다. 포크된 포지토리 페이지에서 먼저 오른쪽의 " "``Code`` 버튼을 클릭하면 레포지토리의 HTTPS 링크를 복사할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:41 +#: ../../source/contributor-tutorial-contribute-on-github.rst:52 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" msgstr "\\를 복사한 후에는 컴퓨터에서 터미널을 열고 레포지토리를 다운로드할 위치로 이동하여 입력하면 됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 msgid "" "This will create a ``flower/`` (or the name of your fork if you renamed " "it) folder in the current working directory." msgstr "현재 작업 디렉터리에``flower/``(또는 포크 이름을 변경한 경우 포크 이름) 폴더가 생성됩니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:66 +#: ../../source/contributor-tutorial-contribute-on-github.rst:78 msgid "**Add origin**" msgstr "**origin 추가**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:50 +#: ../../source/contributor-tutorial-contribute-on-github.rst:62 msgid "You can then go into the repository folder:" msgstr "그런 다음 레포지토리 폴더로 이동할 수 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:56 +#: ../../source/contributor-tutorial-contribute-on-github.rst:68 msgid "" "And here we will need to add an origin to our repository. The origin is " "the \\ of the remote fork repository. To obtain it, we can do as " @@ -1578,27 +1583,27 @@ msgstr "" "여기에 레포지토리에 origin을 추가해야 합니다. origin은 원격 포크 레포지토리의 \\입니다. origin을 " "얻으려면 앞서 설명한 대로 GitHub 계정의 포크 레포지토리로 이동하여 링크를 복사하면 됩니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:61 +#: ../../source/contributor-tutorial-contribute-on-github.rst:75 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" msgstr "\\ 이 복사되면 터미널에 다음 명령을 입력하면 됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:90 +#: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "**Add upstream**" msgstr "**Upstream 추가하기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:69 +#: ../../source/contributor-tutorial-contribute-on-github.rst:81 msgid "" "Now we will add an upstream address to our repository. Still in the same " "directory, we must run the following command:" msgstr "이제 레포지토리에 upstream 주소를 추가하겠습니다. 여전히 같은 디렉터리에서 다음 명령을 실행해야 합니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:76 +#: ../../source/contributor-tutorial-contribute-on-github.rst:88 msgid "The following diagram visually explains what we did in the previous steps:" msgstr "다음 다이어그램은 이전 단계에서 수행한 작업을 시각적으로 설명합니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:80 +#: ../../source/contributor-tutorial-contribute-on-github.rst:92 msgid "" "The upstream is the GitHub remote address of the parent repository (in " "this case Flower), i.e. the one we eventually want to contribute to and " @@ -1610,17 +1615,17 @@ msgstr "" " 기록이 필요한 레포지토리입니다. origin은 우리가 만든 포크된 레포지토리의 GitHub 원격 주소, 즉 우리 계정에 있는 " "사본(포크)입니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:84 +#: ../../source/contributor-tutorial-contribute-on-github.rst:97 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" msgstr "로컬 버전의 포크가 Flower 레포지토리의 최신 변경 사항으로 최신 상태인지 확인하려면 다음 명령을 실행하면 됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:93 +#: ../../source/contributor-tutorial-contribute-on-github.rst:105 msgid "Setting up the coding environment" msgstr "코딩 환경 설정" -#: ../../source/contributor-tutorial-contribute-on-github.rst:95 +#: ../../source/contributor-tutorial-contribute-on-github.rst:107 msgid "" "This can be achieved by following this :doc:`getting started guide for " "contributors ` (note " @@ -1631,50 +1636,50 @@ msgstr "" "as-a-contributor>'를 참조하세요(레포지토리를 복제할 필요는 없습니다). 코드를 작성하고 테스트할 수 있게 되면 드디어" " 변경을 시작할 수 있습니다!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:100 +#: ../../source/contributor-tutorial-contribute-on-github.rst:113 msgid "Making changes" msgstr "변경하기" -#: ../../source/contributor-tutorial-contribute-on-github.rst:102 +#: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" "Before making any changes make sure you are up-to-date with your " "repository:" msgstr "변경하기 전에 레포지토리를 최신 상태로 유지하세요:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:108 +#: ../../source/contributor-tutorial-contribute-on-github.rst:121 msgid "And with Flower's repository:" msgstr "Flower의 레포지토리도 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:122 +#: ../../source/contributor-tutorial-contribute-on-github.rst:134 msgid "**Create a new branch**" msgstr "**새 브랜치 만들기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:115 +#: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" "To make the history cleaner and easier to work with, it is good practice " "to create a new branch for each feature/project that needs to be " "implemented." msgstr "히스토리를 더 깔끔하고 작업하기 쉽게 만들려면 구현해야 하는 각 기능/프로젝트에 대해 새 브랜치를 만드는 것이 좋습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:118 +#: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "" "To do so, just run the following command inside the repository's " "directory:" msgstr "이렇게 하려면 레포지토리 디렉토리에서 다음 명령을 실행하면 됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:136 msgid "**Make changes**" msgstr "**변경하기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:137 msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "선호하는 편집기를 사용하여 멋진 코드를 작성하고 훌륭한 변화를 만들어 보세요!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:138 +#: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "**Test and format your code**" msgstr "**코드 테스트 및 서식 지정**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:128 +#: ../../source/contributor-tutorial-contribute-on-github.rst:139 msgid "" "Don't forget to test and format your code! Otherwise your code won't be " "able to be merged into the Flower repository. This is done so the " @@ -1683,57 +1688,60 @@ msgstr "" "코드를 테스트하고 서식을 지정하는 것을 잊지 마세요! 그렇지 않으면 코드를 Flower 레포지토리에 병합할 수 없습니다. 이는 " "코드베이스가 일관성을 유지하고 이해하기 쉽도록 하기 위한 것입니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:131 +#: ../../source/contributor-tutorial-contribute-on-github.rst:143 msgid "To do so, we have written a few scripts that you can execute:" msgstr "이를 위해 실행할 수 있는 몇 가지 스크립트를 작성했습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:150 +#: ../../source/contributor-tutorial-contribute-on-github.rst:162 msgid "**Stage changes**" msgstr "**변경사항 스테이징**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:141 +#: ../../source/contributor-tutorial-contribute-on-github.rst:152 msgid "" "Before creating a commit that will update your history, you must specify " "to Git which files it needs to take into account." msgstr "기록을 업데이트할 커밋을 만들기 전에 어떤 파일을 고려해야 하는지 Git에 지정해야 합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:143 +#: ../../source/contributor-tutorial-contribute-on-github.rst:155 msgid "This can be done with:" msgstr "이 작업을 수행할 수 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:149 +#: ../../source/contributor-tutorial-contribute-on-github.rst:161 +#, fuzzy msgid "" "To check which files have been modified compared to the last version " "(last commit) and to see which files are staged for commit, you can use " -"the :code:`git status` command." +"the ``git status`` command." msgstr "" "마지막 버전(마지막 커밋)과 비교하여 수정된 파일을 확인하고 커밋을 위해 스테이징된 파일을 확인하려면 :code:`git " "status` 명령을 사용하면 됩니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:160 +#: ../../source/contributor-tutorial-contribute-on-github.rst:173 msgid "**Commit changes**" msgstr "**변경사항 커밋**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:153 +#: ../../source/contributor-tutorial-contribute-on-github.rst:165 +#, fuzzy msgid "" -"Once you have added all the files you wanted to commit using :code:`git " -"add`, you can finally create your commit using this command:" +"Once you have added all the files you wanted to commit using ``git add``," +" you can finally create your commit using this command:" msgstr ":code:`git add`를 사용하여 커밋하려는 모든 파일을 추가한 후, 마지막으로 이 명령을 사용하여 커밋을 생성할 수 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:159 +#: ../../source/contributor-tutorial-contribute-on-github.rst:172 +#, fuzzy msgid "" "The \\ is there to explain to others what the commit " "does. It should be written in an imperative style and be concise. An " -"example would be :code:`git commit -m \"Add images to README\"`." +"example would be ``git commit -m \"Add images to README\"``." msgstr "" "커밋의 내용을 다른 사람에게 설명하기 위해 \\가 있습니다. 명령형 스타일로 작성해야 하며 간결해야" " 합니다. 예를 들면 :code:`git commit -m \"Add images to README\"`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:171 +#: ../../source/contributor-tutorial-contribute-on-github.rst:185 msgid "**Push the changes to the fork**" msgstr "**변경 사항을 포크에 푸시**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:163 +#: ../../source/contributor-tutorial-contribute-on-github.rst:176 msgid "" "Once we have committed our changes, we have effectively updated our local" " history, but GitHub has no way of knowing this unless we push our " @@ -1742,41 +1750,41 @@ msgstr "" "변경 사항을 커밋하면 로컬 히스토리를 효과적으로 업데이트한 것이지만, 변경 사항을 원본의 원격 주소로 푸시하지 않는 한 " "GitHub는 이를 알 방법이 없습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:170 +#: ../../source/contributor-tutorial-contribute-on-github.rst:184 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." msgstr "이 작업이 완료되면 변경한 내용으로 포크된 레포지토리가 업데이트된 것을 GitHub에서 확인할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:174 +#: ../../source/contributor-tutorial-contribute-on-github.rst:188 msgid "Creating and merging a pull request (PR)" msgstr "pull request(PR) 만들기 및 병합하기" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:226 msgid "**Create the PR**" msgstr "**PR 만들기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:177 +#: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" "Once you have pushed changes, on the GitHub webpage of your repository " "you should see the following message:" msgstr "변경 사항을 푸시하고 나면 레포지토리의 GitHub 웹페이지에 다음 메시지가 표시됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:181 +#: ../../source/contributor-tutorial-contribute-on-github.rst:196 msgid "Otherwise you can always find this option in the ``Branches`` page." msgstr "그렇지 않으면 언제든지 ``Branches`` 페이지에서 이 옵션을 찾을 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:183 +#: ../../source/contributor-tutorial-contribute-on-github.rst:198 msgid "" "Once you click the ``Compare & pull request`` button, you should see " "something similar to this:" msgstr "``Compare & pull request`` 버튼을 클릭하면 이와 비슷한 화면이 표시됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:187 +#: ../../source/contributor-tutorial-contribute-on-github.rst:203 msgid "At the top you have an explanation of which branch will be merged where:" msgstr "상단에는 어느 지점이 어디에 병합될 것인지에 대한 설명이 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:191 +#: ../../source/contributor-tutorial-contribute-on-github.rst:207 msgid "" "In this example you can see that the request is to merge the branch " "``doc-fixes`` from my forked repository to branch ``main`` from the " @@ -1785,7 +1793,7 @@ msgstr "" "이 예제에서는 내 포크된 레포지토리의 ``doc-fixes`` 브랜치를 Flower 레포지토리의 ``main`` 브랜치에 병합하라는" " 요청을 볼 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:193 +#: ../../source/contributor-tutorial-contribute-on-github.rst:210 msgid "" "The title should be changed to adhere to the :ref:`pr_title_format` " "guidelines, otherwise it won't be possible to merge the PR. So in this " @@ -1794,7 +1802,7 @@ msgstr "" "제목은 :ref:`pr_title_format` 가이드라인을 준수하도록 변경해야 하며, 그렇지 않으면 PR을 병합할 수 없습니다. " "따라서 이 경우 올바른 제목은 ``docs(framework:skip) Fix typos``이 될 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:196 +#: ../../source/contributor-tutorial-contribute-on-github.rst:214 msgid "" "The input box in the middle is there for you to describe what your PR " "does and to link it to existing issues. We have placed comments (that " @@ -1804,11 +1812,11 @@ msgstr "" "가운데에 있는 입력 상자는 PR의 기능을 설명하고 기존 이슈에 연결할 수 있는 곳입니다. 프로세스를 안내하기 위해 코멘트(PR이 " "열리면 렌더링되지 않음)를 배치했습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:199 +#: ../../source/contributor-tutorial-contribute-on-github.rst:218 msgid "It is important to follow the instructions described in comments." msgstr "코멘트에 설명된 지침을 따르는 것이 중요합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:220 msgid "" "At the bottom you will find the button to open the PR. This will notify " "reviewers that a new PR has been opened and that they should look over it" @@ -1817,94 +1825,94 @@ msgstr "" "하단에는 PR을 여는 버튼이 있습니다. 이렇게 하면 검토자에게 새 PR이 열렸으며 병합하거나 변경을 요청하기 위해 검토해야 함을 " "알립니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:204 +#: ../../source/contributor-tutorial-contribute-on-github.rst:224 msgid "" "If your PR is not yet ready for review, and you don't want to notify " "anyone, you have the option to create a draft pull request:" msgstr "PR이 아직 검토할 준비가 되지 않았고 다른 사람에게 알리고 싶지 않은 경우 pull request 초안을 만드는 옵션이 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "**Making new changes**" msgstr "**new changes 만들기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:229 msgid "" "Once the PR has been opened (as draft or not), you can still push new " "commits to it the same way we did before, by making changes to the branch" " associated with the PR." msgstr "PR이 초안으로 열렸든 아니든, PR과 연결된 브랜치를 변경하여 이전과 같은 방식으로 새 커밋을 푸시할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:253 msgid "**Review the PR**" msgstr "**PR 검토하기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:212 +#: ../../source/contributor-tutorial-contribute-on-github.rst:233 msgid "" "Once the PR has been opened or once the draft PR has been marked as " "ready, a review from code owners will be automatically requested:" msgstr "PR이 열리거나 초안 PR이 준비됨으로 표시되면 코드 소유자의 검토가 자동으로 요청됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:216 +#: ../../source/contributor-tutorial-contribute-on-github.rst:238 msgid "" "Code owners will then look into the code, ask questions, request changes " "or validate the PR." msgstr "그러면 코드 소유자는 코드를 살펴보고, 질문하고, 변경을 요청하거나 PR의 유효성을 검사합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:218 +#: ../../source/contributor-tutorial-contribute-on-github.rst:241 msgid "Merging will be blocked if there are ongoing requested changes." msgstr "진행 중인 변경 요청이 있는 경우 병합이 차단됩니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:222 +#: ../../source/contributor-tutorial-contribute-on-github.rst:245 msgid "" "To resolve them, just push the necessary changes to the branch associated" " with the PR:" msgstr "이를 해결하려면 PR과 연결된 브랜치에 필요한 변경 사항을 푸시하면 됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:226 +#: ../../source/contributor-tutorial-contribute-on-github.rst:250 msgid "And resolve the conversation:" msgstr "그리고 소통을 통해 해결하세요:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:230 +#: ../../source/contributor-tutorial-contribute-on-github.rst:254 msgid "" "Once all the conversations have been resolved, you can re-request a " "review." msgstr "모든 대화가 해결되면 검토를 다시 요청할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:274 msgid "**Once the PR is merged**" msgstr "**PR이 병합되면**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:234 +#: ../../source/contributor-tutorial-contribute-on-github.rst:256 msgid "" "If all the automatic tests have passed and reviewers have no more changes" " to request, they can approve the PR and merge it." msgstr "모든 자동 테스트가 통과되고 검토자가 더 이상 요청할 변경 사항이 없는 경우 PR을 승인하고 병합할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:238 +#: ../../source/contributor-tutorial-contribute-on-github.rst:261 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" msgstr "병합이 완료되면 GitHub에서 브랜치를 삭제할 수 있으며(삭제 버튼이 표시되어야 함), 로컬에서도 삭제할 수 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:245 +#: ../../source/contributor-tutorial-contribute-on-github.rst:269 msgid "Then you should update your forked repository by doing:" msgstr "그런 다음 다음을 수행하여 포크된 레포지토리를 업데이트해야 합니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:277 msgid "Example of first contribution" msgstr "첫 번째 기여의 예" -#: ../../source/contributor-tutorial-contribute-on-github.rst:257 +#: ../../source/contributor-tutorial-contribute-on-github.rst:280 msgid "Problem" msgstr "문제" -#: ../../source/contributor-tutorial-contribute-on-github.rst:259 +#: ../../source/contributor-tutorial-contribute-on-github.rst:282 msgid "" "For our documentation, we've started to use the `Diàtaxis framework " "`_." msgstr "저희 문서에는 'Diàtaxis 프레임워크 `_'를 사용하기 시작했습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:261 +#: ../../source/contributor-tutorial-contribute-on-github.rst:285 msgid "" "Our \"How to\" guides should have titles that continue the sentence \"How" " to …\", for example, \"How to upgrade to Flower 1.0\"." @@ -1912,19 +1920,19 @@ msgstr "" "'How to' 가이드의 제목은 \"How to …\"라는 문장을 이어가는 제목이어야 합니다(예: \"How to upgrade " "to Flower 1.0\")." -#: ../../source/contributor-tutorial-contribute-on-github.rst:263 +#: ../../source/contributor-tutorial-contribute-on-github.rst:288 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." msgstr "대부분의 가이드는 아직 이 새로운 형식을 따르지 않으며, 안타깝게도 제목을 변경하는 작업은 생각보다 복잡합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:265 +#: ../../source/contributor-tutorial-contribute-on-github.rst:291 msgid "" "This issue is about changing the title of a doc from present continuous " "to present simple." msgstr "이번 이슈는 문서 제목을 현재 연속형에서 현재 단순형으로 변경하는 것에 관한 것입니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:267 +#: ../../source/contributor-tutorial-contribute-on-github.rst:294 msgid "" "Let's take the example of \"Saving Progress\" which we changed to \"Save " "Progress\". Does this pass our check?" @@ -1932,19 +1940,19 @@ msgstr "" "\"How to saving progress\"을 \"How to save progress\"으로 변경한 예를 들어 보겠습니다. " "이것이 우리의 점검을 통과했나요?" -#: ../../source/contributor-tutorial-contribute-on-github.rst:269 +#: ../../source/contributor-tutorial-contribute-on-github.rst:297 msgid "Before: \"How to saving progress\" ❌" msgstr "Before: \"How to saving progress\" ❌" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 +#: ../../source/contributor-tutorial-contribute-on-github.rst:299 msgid "After: \"How to save progress\" ✅" msgstr "After: \"How to save progress\" ✅" -#: ../../source/contributor-tutorial-contribute-on-github.rst:274 +#: ../../source/contributor-tutorial-contribute-on-github.rst:302 msgid "Solution" msgstr "해결법" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:304 msgid "" "This is a tiny change, but it'll allow us to test your end-to-end setup. " "After cloning and setting up the Flower repo, here's what you should do:" @@ -1952,17 +1960,17 @@ msgstr "" "이것은 사소한 변경이지만 end-to-end 설정을 테스트할 수 있습니다. Flower 레포지토리를 복제하고 설정한 후에는 다음과 " "같이 하세요:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 msgid "Find the source file in ``doc/source``" msgstr "``doc/source``에서 소스 파일을 찾습니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 msgid "" "Make the change in the ``.rst`` file (beware, the dashes under the title " "should be the same length as the title itself)" msgstr "``.rst`` 파일에서 변경합니다(제목 아래의 대시는 제목 자체의 길이와 같아야 합니다)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:310 msgid "" "Build the docs and `check the result `_" @@ -1970,11 +1978,11 @@ msgstr "" "문서를 빌드하고 '결과 확인 `_'합니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:283 +#: ../../source/contributor-tutorial-contribute-on-github.rst:314 msgid "Rename file" msgstr "파일 이름 바꾸기" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:316 msgid "" "You might have noticed that the file name still reflects the old wording." " If we just change the file, then we break all existing links to it - it " @@ -1984,19 +1992,19 @@ msgstr "" "파일 이름에 여전히 이전 문구가 반영되어 있는 것을 보셨을 것입니다. 파일만 변경하면 파일에 대한 기존 링크가 모두 끊어지는데, " "링크를 끊으면 검색 엔진 순위에 영향을 줄 수 있으므로 이를 방지하는 것이 **매우 중요**합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 +#: ../../source/contributor-tutorial-contribute-on-github.rst:320 msgid "Here's how to change the file name:" msgstr "파일 이름을 변경하는 방법은 다음과 같습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:322 msgid "Change the file name to ``save-progress.rst``" msgstr "파일 이름을 ``save-progress.rst``로 변경합니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +#: ../../source/contributor-tutorial-contribute-on-github.rst:323 msgid "Add a redirect rule to ``doc/source/conf.py``" msgstr "'doc/source/conf.py'에 리디렉션 규칙을 추가합니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:325 msgid "" "This will cause a redirect from ``saving-progress.html`` to ``save-" "progress.html``, old links will continue to work." @@ -2004,11 +2012,11 @@ msgstr "" "이렇게 하면 ``saving-progress.html``에서 ``save-progress.html``로 리디렉션되며, 이전 링크는 " "계속 작동합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:296 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Apply changes in the index file" msgstr "인덱스 파일에 변경 사항 적용" -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +#: ../../source/contributor-tutorial-contribute-on-github.rst:331 msgid "" "For the lateral navigation bar to work properly, it is very important to " "update the ``index.rst`` file as well. This is where we define the whole " @@ -2017,39 +2025,39 @@ msgstr "" "횡방향 내비게이션 바가 제대로 작동하려면 ``index.rst`` 파일도 업데이트하는 것이 매우 중요합니다. 이 파일은 탐색 모음의" " 전체 배열을 정의하는 곳입니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 msgid "Find and modify the file name in ``index.rst``" msgstr "``index.rst``에서 파일 이름을 찾아 수정합니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:338 msgid "Open PR" msgstr "PR 열기" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:340 msgid "" "Commit the changes (commit messages are always imperative: \"Do " "something\", in this case \"Change …\")" msgstr "변경 사항을 커밋합니다(커밋 메시지는 항상 필수 메시지입니다:\"Do something\"(이 경우 는 \"Change …\" )" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:342 msgid "Push the changes to your fork" msgstr "변경 사항을 포크에 푸시합니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:308 +#: ../../source/contributor-tutorial-contribute-on-github.rst:343 msgid "" "Open a PR (as shown above) with title ``docs(framework) Update how-to " "guide title``" msgstr "``docs(framework) Update how-to guide title`` 제목으로 PR(위와 같이)을 엽니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:309 +#: ../../source/contributor-tutorial-contribute-on-github.rst:344 msgid "Wait for it to be approved!" msgstr "승인될 때까지 기다리세요!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:310 +#: ../../source/contributor-tutorial-contribute-on-github.rst:345 msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "축하합니다! 이제 공식적으로 Flower 기여자가 되셨습니다!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:314 +#: ../../source/contributor-tutorial-contribute-on-github.rst:348 #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:573 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1012 #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:811 @@ -2058,41 +2066,42 @@ msgstr "축하합니다! 이제 공식적으로 Flower 기여자가 되셨습니 msgid "Next steps" msgstr "다음 단계" -#: ../../source/contributor-tutorial-contribute-on-github.rst:316 +#: ../../source/contributor-tutorial-contribute-on-github.rst:350 msgid "" "Once you have made your first PR, and want to contribute more, be sure to" " check out the following :" msgstr "첫 번째 PR을 작성하고 더 많은 기여를 하고 싶다면 다음을 확인하세요:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:318 +#: ../../source/contributor-tutorial-contribute-on-github.rst:353 +#, fuzzy msgid "" ":doc:`Good first contributions `, where you should particularly look into the " -":code:`baselines` contributions." +"``baselines`` contributions." msgstr "" ":doc:`훌륭한 첫 번째 기여 `, 특히 " ":code:`baselines` 기여를 살펴봐야 합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:322 +#: ../../source/contributor-tutorial-contribute-on-github.rst:357 #: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "부록" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:362 msgid "PR title format" msgstr "PR 제목 형식" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:364 msgid "We enforce the following PR title format:" msgstr "다음과 같은 PR 제목 형식을 적용합니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:370 msgid "" "(or ``(:skip) `` to ignore the PR in the " "changelog)" msgstr "(또는 ``(:skip) ``를 사용하면 변경 로그에서 PR을 무시합니다.)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +#: ../../source/contributor-tutorial-contribute-on-github.rst:372 msgid "" "Where ```` needs to be in ``{ci, fix, feat, docs, refactor, " "break}``, ```` should be in ``{framework, baselines, datasets, " @@ -2105,49 +2114,49 @@ msgstr "" "':skip' 플래그를 사용해야 하는 여러 프로젝트를 수정하는 경우}``로 입력해야 하며, ````는 대문자로 " "시작해야 합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:341 +#: ../../source/contributor-tutorial-contribute-on-github.rst:377 msgid "Valid examples:" msgstr "유효한 예시입니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:343 +#: ../../source/contributor-tutorial-contribute-on-github.rst:379 msgid "``feat(framework) Add flwr build CLI command``" msgstr "``feat(framework) Add flwr build CLI command``" -#: ../../source/contributor-tutorial-contribute-on-github.rst:344 +#: ../../source/contributor-tutorial-contribute-on-github.rst:380 msgid "``refactor(examples:skip) Improve quickstart-pytorch logging``" msgstr "``refactor(examples:skip) Improve quickstart-pytorch logging``" -#: ../../source/contributor-tutorial-contribute-on-github.rst:345 +#: ../../source/contributor-tutorial-contribute-on-github.rst:381 msgid "``ci(*:skip) Enforce PR title format``" msgstr "``ci(*:skip) Enforce PR title format``" -#: ../../source/contributor-tutorial-contribute-on-github.rst:347 +#: ../../source/contributor-tutorial-contribute-on-github.rst:383 msgid "Invalid examples:" msgstr "잘못된 예시입니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:349 +#: ../../source/contributor-tutorial-contribute-on-github.rst:385 msgid "``feat(framework): Add flwr build CLI command`` (extra ``:``)" msgstr "``feat(framework): Add flwr build CLI command`` ( ``:``제외)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/contributor-tutorial-contribute-on-github.rst:386 msgid "" "``feat(*) Add flwr build CLI command`` (missing ``skip`` flag along with " "``*``)" msgstr "``feat(*) Add flwr build CLI command`` (``skip`` flag와 함께 ``*``누락)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:351 +#: ../../source/contributor-tutorial-contribute-on-github.rst:387 msgid "``feat(skip) Add flwr build CLI command`` (missing ````)" msgstr "``feat(skip) Add flwr build CLI command`` (````누락)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:352 +#: ../../source/contributor-tutorial-contribute-on-github.rst:388 msgid "``feat(framework) add flwr build CLI command`` (non capitalised verb)" msgstr "``feat(framework) add flwr build CLI command`` (대문자로 표기되지 않은 동사)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:353 +#: ../../source/contributor-tutorial-contribute-on-github.rst:389 msgid "``feat(framework) Add flwr build CLI command.`` (dot at the end)" msgstr "``feat(framework) Add flwr build CLI command.`` (끝에 마침표)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:354 +#: ../../source/contributor-tutorial-contribute-on-github.rst:390 msgid "``Add flwr build CLI command.`` (missing ``()``)" msgstr "``Add flwr build CLI command.`` ( ``()``누락)" @@ -2157,8 +2166,9 @@ msgstr "기여자로 시작하기" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 #: ../../source/docker/run-as-subprocess.rst:11 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:15 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:12 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:16 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:13 #: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" msgstr "전제 조건" @@ -2180,8 +2190,9 @@ msgid "(Optional) `pyenv-virtualenv ` msgstr "(선택 사항) `pyenv-virtualenv `_" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 +#, fuzzy msgid "" -"Flower uses :code:`pyproject.toml` to manage dependencies and configure " +"Flower uses ``pyproject.toml`` to manage dependencies and configure " "development tools (the ones which support it). Poetry is a build tool " "which supports `PEP 517 `_." msgstr "" @@ -2189,11 +2200,11 @@ msgstr "" ":code:`pyproject.toml`을 사용합니다. Poetry는 `PEP 517 " "`_을 지원하는 빌드 도구입니다." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:18 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:17 msgid "Developer Machine Setup" msgstr "개발자 머신 설정" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:21 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:20 #, fuzzy msgid "Preliminaries" msgstr "사전 준비" @@ -2212,105 +2223,112 @@ msgid "" "installation actions to add `brew` to your PATH." msgstr "`homebrew `_를 설치합니다. 설치 후 `brew`를 PATH에 추가하는 작업을 잊지 마세요." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:28 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:29 +#, fuzzy msgid "" "Install `xz` (to install different Python versions) and `pandoc` to build" -" the docs::" +" the docs:" msgstr "xz`(다른 Python 버전을 설치하려면)와 `pandoc`을 설치하여 문서를 빌드합니다::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:34 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:36 msgid "For Ubuntu" msgstr "Ubuntu의 경우" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:35 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:38 +#, fuzzy msgid "" "Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all " -"necessary packages::" +"necessary packages:" msgstr "시스템(우분투 22.04 이상)이 최신 상태이고 필요한 패키지가 모두 설치되어 있는지 확인하세요:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:44 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:47 msgid "Create Flower Dev Environment" msgstr "Flower 개발 환경 만들기" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:46 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:49 +#, fuzzy msgid "" -"1. Clone the `Flower repository `_ from " -"GitHub::" +"Clone the `Flower repository `_ from " +"GitHub:" msgstr "1. GitHub: 에서 ``Flower 레포지토리 `_를 복제합니다::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:52 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:56 +#, fuzzy msgid "" "Let's create the Python environment for all-things Flower. If you wish to" -" use :code:`pyenv`, we provide two convenience scripts that you can use. " -"If you prefer using something else than :code:`pyenv`, create a new " -"environment, activate and skip to the last point where all packages are " -"installed." +" use ``pyenv``, we provide two convenience scripts that you can use. If " +"you prefer using something else than ``pyenv``, create a new environment," +" activate and skip to the last point where all packages are installed." msgstr "" "Flower의 모든 것을 위한 파이썬 환경을 만들어 보겠습니다.:code:`pyenv`를 사용하고자 하는 경우 사용할 수 있는 두 " "가지 편의 스크립트를 제공합니다.:code:`pyenv`가 아닌 다른 것을 사용하려면 새 환경을 생성하고 활성화한 후 모든 패키지가" " 설치된 마지막 지점으로 건너뛰세요." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:54 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:61 +#, fuzzy msgid "" -"If you don't have :code:`pyenv` installed, the following script that will" -" install it, set it up, and create the virtual environment (with " -":code:`Python 3.9.20` by default)::" +"If you don't have ``pyenv`` installed, the following script that will " +"install it, set it up, and create the virtual environment (with ``Python " +"3.9.20`` by default):" msgstr "" ":code:`pyenv`가 설치되어 있지 않은 경우 다음 스크립트를 사용하여 설치, 설정 및 가상 환경을 생성합니다(기본적으로 " ":code:`Python 3.9.20` 사용):" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:58 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:68 +#, fuzzy msgid "" -"If you already have :code:`pyenv` installed (along with the :code:`pyenv-" -"virtualenv` plugin), you can use the following convenience script (with " -":code:`Python 3.9.20` by default)::" +"If you already have ``pyenv`` installed (along with the ``pyenv-" +"virtualenv`` plugin), you can use the following convenience script (with " +"``Python 3.9.20`` by default):" msgstr "" ":code:`pyenv`가 이미 설치되어 있는 경우( :code:`pyenv-virtualenv` 플러그인과 함께) 다음과 같은 " "편의 스크립트를 사용할 수 있습니다(기본적으로 코드:`Python 3.9.20` 사용):" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:75 +#, fuzzy msgid "" -"3. Install the Flower package in development mode (think :code:`pip " -"install -e`) along with all necessary dependencies::" +"3. Install the Flower package in development mode (think ``pip install " +"-e``) along with all necessary dependencies:" msgstr "" "3. 필요한 모든 dependencies와 함께 개발 모드에서 Flower 패키지를 설치합니다(예:code:`pip install " "-e`)::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:83 msgid "Convenience Scripts" msgstr "편의 스크립트" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:71 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 +#, fuzzy msgid "" "The Flower repository contains a number of convenience scripts to make " -"recurring development tasks easier and less error-prone. See the " -":code:`/dev` subdirectory for a full list. The following scripts are " -"amongst the most important ones:" +"recurring development tasks easier and less error-prone. See the ``/dev``" +" subdirectory for a full list. The following scripts are amongst the most" +" important ones:" msgstr "" "Flower 레포지토리에는 반복적인 개발 작업을 더 쉽고 오류를 줄이기 위한 여러 가지 편의 스크립트가 포함되어 있습니다. 전체 " "목록은 :code:`/dev` 하위 디렉터리를 참조하세요. 다음 스크립트는 가장 중요한 스크립트 중 하나입니다:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:90 msgid "Create/Delete Virtual Environment" msgstr "가상 환경 생성/삭제" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:98 msgid "Compile ProtoBuf Definitions" msgstr "ProtoBuf 정의 컴파일" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:105 msgid "Auto-Format Code" msgstr "자동 포맷 코드" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:99 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112 msgid "Run Linters and Tests" msgstr "린터 및 테스트 실행" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:106 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 msgid "Add a pre-commit hook" msgstr "사전 커밋 훅 추가" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:121 msgid "" "Developers may integrate a pre-commit hook into their workflow utilizing " "the `pre-commit `_ library. The pre-" @@ -2321,90 +2339,95 @@ msgstr "" " 워크플로에 통합할 수 있습니다. 사전 커밋 훅은 두 가지 기본 작업을 실행하도록 구성됩니다:``./dev/format.sh`` 및" " ``./dev/test.sh`` 스크립트." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:110 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:125 msgid "There are multiple ways developers can use this:" msgstr "개발자가 이것을 사용할 수 있는 여러가지 방법이 있습니다:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:127 msgid "Install the pre-commit hook to your local git directory by simply running:" msgstr "간단하게 실행하여 로컬 git 디렉터리에 사전 커밋 훅을 설치하세요:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:118 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133 msgid "" "Each ``git commit`` will trigger the execution of formatting and " "linting/test scripts." msgstr "각 ``git 커밋``은 포맷 및 린팅/테스트 스크립트의 실행을 트리거합니다." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135 +#, fuzzy msgid "" "If in a hurry, bypass the hook using ``--no-verify`` with the ``git " -"commit`` command. ::" +"commit`` command." msgstr "급한 경우 ``git commit`` 명령과 함께 `--no-verify``를 사용하여 훅을 넘기세요:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 msgid "" "For developers who prefer not to install the hook permanently, it is " "possible to execute a one-time check prior to committing changes by using" " the following command:" msgstr "훅을 영구적으로 설치하지 않으려는 개발자의 경우 다음 명령을 사용하여 변경 사항을 커밋하기 전에 일회성 검사를 실행할 수 있습니다:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149 msgid "" "This executes the formatting and linting checks/tests on all the files " "without modifying the default behavior of ``git commit``." msgstr "이렇게 하면 ``git commit``의 기본 동작을 수정하지 않고 모든 파일에 대해 포맷 및 린팅 검사/테스트를 실행합니다." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:153 msgid "Run Github Actions (CI) locally" msgstr "로컬에서 Github Action(CI) 실행하기" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:155 +#, fuzzy msgid "" "Developers could run the full set of Github Actions workflows under their" " local environment by using `Act `_. " "Please refer to the installation instructions under the linked repository" -" and run the next command under Flower main cloned repository folder::" +" and run the next command under Flower main cloned repository folder:" msgstr "" "개발자는 `Act `_를 사용하여 로컬 환경에서 전체 Github " "Actions 워크플로우 세트를 실행할 수 있습니다. 링크된 레포지토리 아래의 설치 지침을 참조하여 Flower 메인 클론 " "레포지토리 폴더 아래에서 다음 명령을 실행하세요::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:164 msgid "" "The Flower default workflow would run by setting up the required Docker " "machines underneath." msgstr "Flower 기본 워크플로우는 아래에 필요한 Docker 머신을 설정하여 실행합니다." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:147 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:168 msgid "Build Release" msgstr "릴리즈 빌드" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:170 +#, fuzzy msgid "" "Flower uses Poetry to build releases. The necessary command is wrapped in" -" a simple script::" +" a simple script:" msgstr "Flower는 Poetry를 사용하여 릴리즈를 빌드합니다. 필요한 명령은 간단한 스크립트로 래핑됩니다::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:154 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:177 +#, fuzzy msgid "" -"The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in" -" the :code:`/dist` subdirectory." +"The resulting ``.whl`` and ``.tar.gz`` releases will be stored in the " +"``/dist`` subdirectory." msgstr "결과물인 :code:`.whl` 및 :code:`.tar.gz` 릴리즈는 :code:`/dist` 하위 디렉터리에 저장됩니다." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:159 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:181 msgid "Build Documentation" msgstr "문서 빌드" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:161 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:183 +#, fuzzy msgid "" "Flower's documentation uses `Sphinx `_. " "There's no convenience script to re-build the documentation yet, but it's" -" pretty easy::" +" pretty easy:" msgstr "" "Flower의 문서는 `Sphinx `_를 사용합니다. 아직 문서를 다시 작성할" " 수 있는 편리한 스크립트는 없지만 다음과 같이 쉽게 작성할 수 있습니다:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:191 msgid "This will generate HTML documentation in ``doc/build/html``." msgstr "그러면 ``doc/build/html``에 HTML 문서가 생성됩니다." @@ -2460,7 +2483,7 @@ msgstr "" "사용자 ID를 변경하려면 ``sudo chown -R 49999:49999 certificates/*``를 실행하면 됩니다." #: ../../source/docker/enable-tls.rst:23 -#: ../../source/docker/persist-superlink-state.rst:14 +#: ../../source/docker/persist-superlink-state.rst:15 msgid "" "If you later want to delete the directory, you can change the user ID " "back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " @@ -2483,22 +2506,22 @@ msgstr "" msgid "Understanding the command" msgstr "" -#: ../../source/docker/enable-tls.rst:44 ../../source/docker/enable-tls.rst:91 +#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 #: ../../source/docker/enable-tls.rst:125 #: ../../source/docker/tutorial-quickstart-docker.rst:66 #: ../../source/docker/tutorial-quickstart-docker.rst:103 -#: ../../source/docker/tutorial-quickstart-docker.rst:213 -#: ../../source/docker/tutorial-quickstart-docker.rst:300 +#: ../../source/docker/tutorial-quickstart-docker.rst:217 +#: ../../source/docker/tutorial-quickstart-docker.rst:305 #, fuzzy msgid "``docker run``: This tells Docker to run a container from an image." msgstr "``docker run``: 새 Docker 컨테이너를 실행하는 명령입니다." -#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 +#: ../../source/docker/enable-tls.rst:46 ../../source/docker/enable-tls.rst:93 #: ../../source/docker/enable-tls.rst:126 #: ../../source/docker/tutorial-quickstart-docker.rst:67 #: ../../source/docker/tutorial-quickstart-docker.rst:104 -#: ../../source/docker/tutorial-quickstart-docker.rst:214 -#: ../../source/docker/tutorial-quickstart-docker.rst:301 +#: ../../source/docker/tutorial-quickstart-docker.rst:218 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 msgid "``--rm``: Remove the container once it is stopped or the command exits." msgstr "" @@ -2603,12 +2626,12 @@ msgstr "" msgid "the network." msgstr "" -#: ../../source/docker/enable-tls.rst:71 +#: ../../source/docker/enable-tls.rst:72 #, fuzzy msgid "SuperNode" msgstr "run\\_supernode" -#: ../../source/docker/enable-tls.rst:73 +#: ../../source/docker/enable-tls.rst:74 #, fuzzy msgid "" "Assuming that the ``ca.crt`` certificate already exists locally, we can " @@ -2619,7 +2642,7 @@ msgstr "" "디렉터리에 마운트할 수 있습니다. 이렇게 하면 SuperNode가 컨테이너 내의 인증서에 액세스할 수 있습니다. 컨테이너를 시작할 " "때 ``--root-certificates`` 플래그를 사용하세요." -#: ../../source/docker/enable-tls.rst:78 +#: ../../source/docker/enable-tls.rst:79 msgid "" "If you're generating self-signed certificates and the ``ca.crt`` " "certificate doesn't exist on the SuperNode, you can copy it over after " @@ -2734,16 +2757,16 @@ msgstr "" msgid "Getting Started" msgstr "시작하기" -#: ../../source/docker/index.rst:20 +#: ../../source/docker/index.rst:19 msgid "Running in Production" msgstr "" -#: ../../source/docker/index.rst:29 +#: ../../source/docker/index.rst:28 #, fuzzy msgid "Advanced Options" msgstr "고급 Docker 옵션" -#: ../../source/docker/index.rst:41 +#: ../../source/docker/index.rst:40 #, fuzzy msgid "Run Flower using Docker Compose" msgstr "Docker를 사용하여 Flower 실행" @@ -2769,7 +2792,7 @@ msgid "" " on your host system and a name for the database file." msgstr "" -#: ../../source/docker/persist-superlink-state.rst:10 +#: ../../source/docker/persist-superlink-state.rst:11 msgid "" "By default, the SuperLink container runs with a non-root user called " "``app`` with the user ID ``49999``. It is recommended to create a new " @@ -2777,7 +2800,7 @@ msgid "" "the mounted directory has the proper permissions." msgstr "" -#: ../../source/docker/persist-superlink-state.rst:20 +#: ../../source/docker/persist-superlink-state.rst:21 #, fuzzy msgid "" "In the example below, we create a new directory called ``state``, change " @@ -2790,7 +2813,7 @@ msgstr "" "``state`` 디렉터리를 컨테이너의 ``/app/state`` 디렉터리에 마운트하도록 지시합니다. 또한 " "``--database`` 플래그를 사용하여 데이터베이스 파일의 이름을 지정합니다." -#: ../../source/docker/persist-superlink-state.rst:35 +#: ../../source/docker/persist-superlink-state.rst:36 #, fuzzy msgid "" "As soon as the SuperLink starts, the file ``state.db`` is created in the " @@ -2823,18 +2846,18 @@ msgstr "" "태그 뒤에 있는 이미지가 업데이트될 수 있습니다. 이러한 업데이트에는 일반적으로 Flower의 기능을 변경해서는 안 되는 시스템 " "의존성에 대한 보안 업데이트가 포함됩니다. 그러나 항상 동일한 이미지를 사용하려면 태그 대신 이미지의 해시를 지정할 수 있습니다." -#: ../../source/docker/pin-version.rst:13 +#: ../../source/docker/pin-version.rst:14 #, fuzzy msgid "" "The following command returns the current image digest referenced by the " ":substitution-code:`superlink:|stable_flwr_version|` tag:" msgstr "다음 명령은 ``superlink:1.8.0`` 태그가 참조하는 현재 이미지 해시를 반환합니다:" -#: ../../source/docker/pin-version.rst:22 +#: ../../source/docker/pin-version.rst:23 msgid "This will output" msgstr "" -#: ../../source/docker/pin-version.rst:29 +#: ../../source/docker/pin-version.rst:30 #, fuzzy msgid "Next, we can pin the digest when running a new SuperLink container:" msgstr "다음으로, 새 SuperLink 컨테이너를 실행할 때 해시를 고정할 수 있습니다:" @@ -2891,7 +2914,7 @@ msgstr "" "Docker 이미지 빌드 과정에서 루트 사용자로 전환하여 누락된 시스템 의존성을 설치하려면 Dockerfile 내에서 ``USER " "root`` 지시어를 사용할 수 있습니다." -#: ../../source/docker/run-as-root-user.rst:29 +#: ../../source/docker/run-as-root-user.rst:30 #, fuzzy msgid "SuperNode Dockerfile" msgstr "SuperNode Dockerfile 만들기" @@ -2918,12 +2941,12 @@ msgid "" "done by extending the SuperNode image:" msgstr "" -#: ../../source/docker/run-as-subprocess.rst:16 +#: ../../source/docker/run-as-subprocess.rst:17 #, fuzzy msgid "Dockerfile.supernode" msgstr "Flower SuperNode" -#: ../../source/docker/run-as-subprocess.rst:30 +#: ../../source/docker/run-as-subprocess.rst:31 #, fuzzy msgid "" "Next, build the SuperNode Docker image by running the following command " @@ -2957,82 +2980,83 @@ msgid "" " Engine via Docker Compose." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:12 msgid "" "Some quickstart examples may have limitations or requirements that " "prevent them from running on every environment. For more information, " -"please see `Limitations`_." +"please see Limitations_." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:17 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:14 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:15 #: ../../source/docker/tutorial-quickstart-docker.rst:13 #, fuzzy msgid "Before you start, make sure that:" msgstr "시작하기 전에 Docker daemon이 실행 중인지 확인하세요:" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:19 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:16 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 #: ../../source/docker/tutorial-quickstart-docker.rst:15 msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 #: ../../source/docker/tutorial-quickstart-docker.rst:16 #, fuzzy msgid "The Docker daemon is running." msgstr "Docker 데몬이 실행 중인지 확인하십시오." -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:19 msgid "Docker Compose is `installed `_." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:24 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:25 #, fuzzy msgid "Run the Quickstart Example" msgstr "예시 요청" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:26 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:27 msgid "" "Clone the quickstart example you like to run. For example, ``quickstart-" "pytorch``:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:34 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:35 msgid "" "Download the `compose.yml " "`_" " file into the example directory:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:41 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:44 #, fuzzy msgid "Build and start the services using the following command:" msgstr "다음 명령을 실행하여 가상 환경을 활성화합니다:" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:47 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:50 #, fuzzy msgid "" "Append the following lines to the end of the ``pyproject.toml`` file and " "save it:" msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:49 -#: ../../source/docker/tutorial-quickstart-docker.rst:319 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:52 +#: ../../source/docker/tutorial-quickstart-docker.rst:324 #, fuzzy msgid "pyproject.toml" msgstr "또는 ``pyproject.toml``:" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:58 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 msgid "" "You can customize the string that follows ``tool.flwr.federations.`` to " "fit your needs. However, please note that the string cannot contain a dot" " (``.``)." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:64 msgid "" "In this example, ``local-deployment`` has been used. Just remember to " "replace ``local-deployment`` with your chosen name in both the " @@ -3040,57 +3064,57 @@ msgid "" "command." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:65 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 #, fuzzy msgid "Run the example:" msgstr "전체 코드 예제" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:71 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:74 msgid "Follow the logs of the SuperExec service:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:77 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 msgid "" "That is all it takes! You can monitor the progress of the run through the" " logs of the SuperExec." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 msgid "Run a Different Quickstart Example" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:82 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:86 msgid "" "To run a different quickstart example, such as ``quickstart-tensorflow``," " first, shut down the Docker Compose services of the current example:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:89 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:93 msgid "After that, you can repeat the steps above." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:92 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:98 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:96 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 #, fuzzy msgid "Limitations" msgstr "동기" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:97 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 #, fuzzy msgid "Quickstart Example" msgstr "빠른 시작" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:99 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 #, fuzzy msgid "quickstart-fastai" msgstr "빠른 시작 튜토리얼" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:100 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 #: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 #: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 #: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 @@ -3098,80 +3122,80 @@ msgstr "빠른 시작 튜토리얼" msgid "None" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 #, fuzzy msgid "quickstart-huggingface" msgstr "빠른 시작 튜토리얼" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 #, fuzzy msgid "quickstart-jax" msgstr "빠른 시작" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 msgid "" "The example has not yet been updated to work with the latest ``flwr`` " "version." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 #, fuzzy msgid "quickstart-mlcube" msgstr "빠른 시작" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 #, fuzzy msgid "quickstart-mlx" msgstr "빠른 시작" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 msgid "" "`Requires to run on macOS with Apple Silicon `_." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 #, fuzzy msgid "quickstart-monai" msgstr "빠른 시작" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 #, fuzzy msgid "quickstart-pandas" msgstr "빠른 시작 튜토리얼" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:113 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 msgid "quickstart-pytorch-lightning" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 msgid "" "Requires an older pip version that is not supported by the Flower Docker " "images." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 #, fuzzy msgid "quickstart-pytorch" msgstr "빠른 시작 튜토리얼" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 msgid "quickstart-sklearn-tabular" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 #, fuzzy msgid "quickstart-tabnet" msgstr "빠른 시작 튜토리얼" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:126 #, fuzzy msgid "quickstart-tensorflow" msgstr "빠른 시작 튜토리얼" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:127 msgid "Only runs on AMD64." msgstr "" @@ -3188,6 +3212,207 @@ msgid "" "environment variables for a container." msgstr "Docker 컨테이너 내에서 변수를 설정하려면 ``-e =`` 플래그를 사용하면 됩니다." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:2 +#, fuzzy +msgid "Deploy Flower on Multiple Machines with Docker Compose" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:4 +msgid "" +"This guide will help you set up a Flower project on multiple machines " +"using Docker Compose." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:7 +msgid "" +"You will learn how to run the Flower client and server components on two " +"separate machines, with Flower configured to use TLS encryption and " +"persist SuperLink state across restarts. A server consists of a SuperLink" +" and ``SuperExec``. For more details about the Flower architecture, refer" +" to the :doc:`../explanation-flower-architecture` explainer page." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:13 +msgid "" +"This guide assumes you have completed the :doc:`tutorial-quickstart-" +"docker-compose` tutorial. It is highly recommended that you follow and " +"understand the contents of that tutorial before proceeding with this " +"guide." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:20 +msgid "Before you begin, make sure you have the following prerequisites:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:23 +msgid "The Docker daemon is running on your local machine and the remote machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:24 +msgid "" +"Docker Compose V2 is installed on both your local machine and the remote " +"machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:25 +msgid "You can connect to the remote machine from your local machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:26 +msgid "Ports ``9091`` and ``9093`` are accessible on the remote machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:30 +msgid "" +"The guide uses the |quickstart_sklearn_tabular|_ example as an example " +"project." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:32 +msgid "" +"If your project has a different name or location, please remember to " +"adjust the commands/paths accordingly." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker.rst:19 +msgid "Step 1: Set Up" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:38 +msgid "Clone the Flower repository and change to the ``distributed`` directory:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:45 +msgid "Get the IP address from the remote machine and save it for later." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 +msgid "" +"Use the ``certs.yml`` Compose file to generate your own self-signed " +"certificates. If you have certificates, you can continue with Step 2." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:51 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:221 +msgid "These certificates should be used only for development purposes." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:53 +msgid "" +"For production environments, you may have to use dedicated services to " +"obtain your certificates." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:56 +msgid "" +"First, set the environment variables ``SUPERLINK_IP`` and " +"``SUPEREXEC_IP`` with the IP address from the remote machine. For " +"example, if the IP is ``192.168.2.33``, execute:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:65 +msgid "Next, generate the self-signed certificates:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:72 +msgid "Step 2: Copy the Server Compose Files" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:74 +msgid "" +"Use the method that works best for you to copy the ``server`` directory, " +"the certificates, and your Flower project to the remote machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:77 +msgid "For example, you can use ``scp`` to copy the directories:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:87 +msgid "Step 3: Start the Flower Server Components" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:89 +msgid "" +"Log into the remote machine using ``ssh`` and run the following command " +"to start the SuperLink and SuperExec services:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:102 +msgid "" +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``server`` Docker Compose files." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:105 +msgid "Go back to your terminal on your local machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:108 +#, fuzzy +msgid "Step 4: Start the Flower Client Components" +msgstr "서버(SuperLink)" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:110 +msgid "" +"On your local machine, run the following command to start the client " +"components:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:120 +msgid "" +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``client`` Docker Compose files." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:124 +#, fuzzy +msgid "Step 5: Run Your Flower Project" +msgstr "Flower SuperNode를 실행합니다." + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:126 +msgid "" +"Specify the remote SuperExec IP addresses and the path to the root " +"certificate in the ``[tool.flwr.federations.remote-superexec]`` table in " +"the ``pyproject.toml`` file. Here, we have named our remote federation " +"``remote-superexec``:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:130 +msgid "examples/quickstart-sklearn-tabular/pyproject.toml" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 +msgid "" +"The Path of the ``root-certificates`` should be relative to the location " +"of the ``pyproject.toml`` file." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:142 +msgid "To run the project, execute:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:148 +msgid "" +"That's it! With these steps, you've set up Flower on two separate " +"machines and are ready to start using it." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +msgid "Step 6: Clean Up" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:154 +#, fuzzy +msgid "Shut down the Flower client components:" +msgstr "Flower 클라이언트 앱을 실행합니다." + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 +msgid "Shut down the Flower server components and delete the SuperLink state:" +msgstr "" + #: ../../source/docker/tutorial-quickstart-docker.rst:2 #, fuzzy msgid "Quickstart with Docker" @@ -3206,12 +3431,7 @@ msgid "" " understanding the basic workflow that uses the minimum configurations." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:21 -#: ../../source/docker/tutorial-quickstart-docker.rst:19 -msgid "Step 1: Set Up" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:31 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:32 #: ../../source/docker/tutorial-quickstart-docker.rst:21 msgid "Create a new Flower project (PyTorch):" msgstr "" @@ -3233,7 +3453,7 @@ msgstr "" msgid "Step 2: Start the SuperLink" msgstr "서버(SuperLink)" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:60 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:62 #: ../../source/docker/tutorial-quickstart-docker.rst:52 msgid "Open your terminal and run:" msgstr "" @@ -3259,8 +3479,8 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst:71 #: ../../source/docker/tutorial-quickstart-docker.rst:108 -#: ../../source/docker/tutorial-quickstart-docker.rst:215 -#: ../../source/docker/tutorial-quickstart-docker.rst:304 +#: ../../source/docker/tutorial-quickstart-docker.rst:219 +#: ../../source/docker/tutorial-quickstart-docker.rst:309 msgid "" "``--network flwr-network``: Make the container join the network named " "``flwr-network``." @@ -3272,8 +3492,8 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst:73 #: ../../source/docker/tutorial-quickstart-docker.rst:110 -#: ../../source/docker/tutorial-quickstart-docker.rst:216 -#: ../../source/docker/tutorial-quickstart-docker.rst:306 +#: ../../source/docker/tutorial-quickstart-docker.rst:220 +#: ../../source/docker/tutorial-quickstart-docker.rst:311 msgid "" "``--detach``: Run the container in the background, freeing up the " "terminal." @@ -3389,13 +3609,13 @@ msgid "" "extends the ClientApp image and installs the required dependencies." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:148 +#: ../../source/docker/tutorial-quickstart-docker.rst:149 msgid "" "Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " "the following code into it:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:150 +#: ../../source/docker/tutorial-quickstart-docker.rst:152 #, fuzzy msgid "Dockerfile.clientapp" msgstr "flower 클라이언트 앱" @@ -3480,7 +3700,7 @@ msgstr "" msgid "the default command run when the container is started." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:184 +#: ../../source/docker/tutorial-quickstart-docker.rst:186 msgid "" "Note that `flwr `__ is already installed " "in the ``flwr/clientapp`` base image, so only other package dependencies " @@ -3489,7 +3709,7 @@ msgid "" "after it has been copied into the Docker image (see line 5)." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:189 +#: ../../source/docker/tutorial-quickstart-docker.rst:192 #, fuzzy msgid "" "Next, build the ClientApp Docker image by running the following command " @@ -3498,7 +3718,7 @@ msgstr "" "다음으로, Docker파일과 ServerApp 코드가 있는 디렉터리에서 다음 명령을 실행하여 ServerApp Docker 이미지를" " 빌드합니다." -#: ../../source/docker/tutorial-quickstart-docker.rst:198 +#: ../../source/docker/tutorial-quickstart-docker.rst:201 #, fuzzy msgid "" "The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " @@ -3508,7 +3728,7 @@ msgstr "" "이미지에``flwr_serverapp``이라는 이름을 붙이고 ``0.0.1``이라는 태그를 붙였습니다. 여기서 선택한 값은 예시일 " "뿐이라는 점을 기억하세요. 필요에 따라 변경할 수 있습니다." -#: ../../source/docker/tutorial-quickstart-docker.rst:201 +#: ../../source/docker/tutorial-quickstart-docker.rst:205 msgid "Start the first ClientApp container:" msgstr "" @@ -3529,34 +3749,34 @@ msgstr "" msgid "``supernode-1:9094``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:222 +#: ../../source/docker/tutorial-quickstart-docker.rst:226 msgid "Start the second ClientApp container:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:233 +#: ../../source/docker/tutorial-quickstart-docker.rst:237 msgid "Step 5: Start the SuperExec" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:235 +#: ../../source/docker/tutorial-quickstart-docker.rst:239 #, fuzzy msgid "" "The procedure for building and running a SuperExec image is almost " "identical to the ClientApp image." msgstr "ServerApp 이미지를 빌드하고 실행하는 절차는 SuperNode 이미지와 거의 동일합니다." -#: ../../source/docker/tutorial-quickstart-docker.rst:237 +#: ../../source/docker/tutorial-quickstart-docker.rst:242 msgid "" "Similar to the ClientApp image, you will need to create a Dockerfile that" " extends the SuperExec image and installs the required FAB dependencies." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:240 +#: ../../source/docker/tutorial-quickstart-docker.rst:245 msgid "" "Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " "the following code in:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:242 +#: ../../source/docker/tutorial-quickstart-docker.rst:248 msgid "Dockerfile.superexec" msgstr "" @@ -3586,13 +3806,13 @@ msgstr "" msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:277 +#: ../../source/docker/tutorial-quickstart-docker.rst:283 msgid "" "Afterward, in the directory that holds the Dockerfile, execute this " "Docker command to build the SuperExec image:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:285 +#: ../../source/docker/tutorial-quickstart-docker.rst:290 msgid "Start the SuperExec container:" msgstr "" @@ -3606,7 +3826,7 @@ msgid "" "``http://localhost:9093``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:305 +#: ../../source/docker/tutorial-quickstart-docker.rst:310 msgid "``--name superexec``: Assign the name ``superexec`` to the container." msgstr "" @@ -3627,78 +3847,78 @@ msgstr "" msgid "connect to the SuperLink running on port ``9091``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:315 +#: ../../source/docker/tutorial-quickstart-docker.rst:320 msgid "Step 6: Run the Quickstart Project" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:317 +#: ../../source/docker/tutorial-quickstart-docker.rst:322 #, fuzzy msgid "Add the following lines to the ``pyproject.toml``:" msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" -#: ../../source/docker/tutorial-quickstart-docker.rst:326 +#: ../../source/docker/tutorial-quickstart-docker.rst:331 msgid "Run the ``quickstart-docker`` project by executing the command:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:332 +#: ../../source/docker/tutorial-quickstart-docker.rst:337 msgid "Follow the SuperExec logs to track the execution of the run:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:339 +#: ../../source/docker/tutorial-quickstart-docker.rst:344 msgid "Step 7: Update the Application" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:341 +#: ../../source/docker/tutorial-quickstart-docker.rst:346 msgid "" -"Change the application code. For example, change the ``seed`` in " +"Change the application code. For example, change the ``seed`` in " "``quickstart_docker/task.py`` to ``43`` and save it:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:344 +#: ../../source/docker/tutorial-quickstart-docker.rst:349 msgid "quickstart_docker/task.py" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:351 +#: ../../source/docker/tutorial-quickstart-docker.rst:356 #, fuzzy msgid "Stop the current ClientApp containers:" msgstr "현재 클라이언트 속성입니다." -#: ../../source/docker/tutorial-quickstart-docker.rst:357 +#: ../../source/docker/tutorial-quickstart-docker.rst:362 #, fuzzy msgid "Rebuild the FAB and ClientApp image:" msgstr "기본 이미지 빌드" -#: ../../source/docker/tutorial-quickstart-docker.rst:363 +#: ../../source/docker/tutorial-quickstart-docker.rst:368 msgid "Launch two new ClientApp containers based on the newly built image:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:378 +#: ../../source/docker/tutorial-quickstart-docker.rst:383 msgid "Run the updated project:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:385 +#: ../../source/docker/tutorial-quickstart-docker.rst:390 msgid "Step 8: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:387 +#: ../../source/docker/tutorial-quickstart-docker.rst:392 msgid "Remove the containers and the bridge network:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:401 -#: ../../source/docker/tutorial-quickstart-docker.rst:399 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:408 +#: ../../source/docker/tutorial-quickstart-docker.rst:404 #, fuzzy msgid "Where to Go Next" msgstr "시작 위치" -#: ../../source/docker/tutorial-quickstart-docker.rst:401 +#: ../../source/docker/tutorial-quickstart-docker.rst:406 msgid ":doc:`enable-tls`" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:402 +#: ../../source/docker/tutorial-quickstart-docker.rst:407 msgid ":doc:`persist-superlink-state`" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:403 +#: ../../source/docker/tutorial-quickstart-docker.rst:408 msgid ":doc:`tutorial-quickstart-docker-compose`" msgstr "" @@ -3721,179 +3941,179 @@ msgid "" "configuration that best suits your project's needs." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:23 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:24 msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:37 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:38 msgid "" "Export the path of the newly created project. The path should be relative" " to the location of the Docker Compose files:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:44 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:45 msgid "" "Setting the ``PROJECT_DIR`` helps Docker Compose locate the " "``pyproject.toml`` file, allowing it to install dependencies in the " "SuperExec and SuperNode images correctly." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:48 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:49 #, fuzzy msgid "Step 2: Run Flower in Insecure Mode" msgstr "Flower SuperNode를 실행합니다." -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:50 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 msgid "" "To begin, start Flower with the most basic configuration. In this setup, " "Flower will run without TLS and without persisting the state." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:55 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:56 msgid "" "Without TLS, the data sent between the services remains **unencrypted**. " "Use it only for development purposes." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:58 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:59 msgid "" "For production-oriented use cases, :ref:`enable TLS` for secure data" " transmission." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:68 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:179 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 #, fuzzy msgid "``docker compose``: The Docker command to run the Docker Compose tool." msgstr "``docker run``: 새 Docker 컨테이너를 실행하는 명령입니다." -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:69 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:180 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 msgid "" "``-f compose.yml``: Specify the YAML file that contains the basic Flower " "service definitions." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 msgid "" "``--build``: Rebuild the images for each service if they don't already " "exist." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:186 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:191 msgid "" "``-d``: Detach the containers from the terminal and run them in the " "background." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:74 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 msgid "Step 3: Run the Quickstart Project" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:78 msgid "" "Now that the Flower services have been started via Docker Compose, it is " "time to run the quickstart example." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:79 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:81 msgid "" "To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" " the SuperExec addresses in the ``pyproject.toml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:82 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:228 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:86 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:234 msgid "quickstart-compose/pyproject.toml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:91 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:93 msgid "Execute the command to run the quickstart example:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:97 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:99 msgid "Monitor the SuperExec logs and wait for the summary to appear:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:104 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 msgid "Step 4: Update the Application" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 msgid "In the next step, change the application code." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 msgid "" "For example, go to the ``task.py`` file in the ``quickstart-" "compose/quickstart_compose/`` directory and add a ``print`` call in the " "``get_weights`` function:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:111 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:114 msgid "quickstart-compose/quickstart_compose/task.py" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:120 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 #, fuzzy msgid "Rebuild and restart the services." msgstr "이미 *서버*를 시작할 수 있습니다:" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:124 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 msgid "" "If you have modified the dependencies listed in your ``pyproject.toml`` " "file, it is essential to rebuild images." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:127 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:132 msgid "If you haven't made any changes, you can skip this step." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:134 msgid "Run the following command to rebuild and restart the services:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:135 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:140 msgid "Run the updated quickstart example:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:142 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:147 msgid "In the SuperExec logs, you should find the ``Get weights`` line:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:159 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:164 msgid "Step 5: Persisting the SuperLink State" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:161 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 msgid "" "In this step, Flower services are configured to persist the state of the " "SuperLink service, ensuring that it maintains its state even after a " "restart." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 msgid "" "When working with Docker Compose on Linux, you may need to create the " "``state`` directory first and change its ownership to ensure proper " "access and permissions." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:169 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:174 msgid "" "For more information, consult the following page: :doc:`persist-" "superlink-state`." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:220 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:176 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 msgid "Run the command:" msgstr "" @@ -3914,17 +4134,17 @@ msgid "" "rules>`_." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:188 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:193 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:375 msgid "Rerun the ``quickstart-compose`` project:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:194 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:199 msgid "Check the content of the ``state`` directory:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:201 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:206 msgid "" "You should see a ``state.db`` file in the ``state`` directory. If you " "restart the service, the state file will be used to restore the state " @@ -3932,125 +4152,121 @@ msgid "" "if the containers are stopped and started again." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:208 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 msgid "Step 6: Run Flower with TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:210 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:216 msgid "" "To demonstrate how to enable TLS, generate self-signed certificates using" " the ``certs.yml`` Compose file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:215 -msgid "These certificates should be used only for development purposes." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:217 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 msgid "" "For production environments, use a service like `Let's Encrypt " "`_ to obtain your certificates." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:235 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 msgid "Restart the services with TLS enabled:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:249 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:255 msgid "Step 7: Add another SuperNode" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:251 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 msgid "" "You can add more SuperNodes and ClientApps by duplicating their " "definitions in the ``compose.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:254 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:260 msgid "" "Just give each new SuperNode and ClientApp service a unique service name " "like ``supernode-3``, ``clientapp-3``, etc." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:263 msgid "In ``compose.yml``, add the following:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:259 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:265 msgid "compose.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:310 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:316 msgid "" "If you also want to enable TLS for the new SuperNodes, duplicate the " "SuperNode definition for each new SuperNode service in the ``with-" "tls.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:313 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:319 msgid "" "Make sure that the names of the services match with the one in the " "``compose.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:315 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:321 msgid "In ``with-tls.yml``, add the following:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:317 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:323 msgid "with-tls.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:339 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:345 msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:341 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:347 msgid "" "To run Flower with persisted SuperLink state and enabled TLS, a slight " "change in the ``with-state.yml`` file is required:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:344 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:350 msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:346 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:352 msgid "with-state.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:363 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 #, fuzzy msgid "Restart the services:" msgstr "이미 *서버*를 시작할 수 있습니다:" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:377 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:383 msgid "Step 9: Merge Multiple Compose Files" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:379 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:385 msgid "" "You can merge multiple Compose files into a single file. For instance, if" " you wish to combine the basic configuration with the TLS configuration, " "execute the following command:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:387 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:394 msgid "" "This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" " a new file called ``my_compose.yml``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:391 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:398 msgid "Step 10: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:393 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 #, fuzzy msgid "Remove all services and volumes:" msgstr "R에서 모든 항목을 제거합니다." -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:403 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:410 #, fuzzy msgid ":doc:`run-quickstart-examples-docker-compose`" msgstr "빠른 시작 튜토리얼" @@ -4069,7 +4285,7 @@ msgstr "" "다른 버전의 Flower를 사용하려면 태그를 변경하여 사용할 수 있습니다(예: Flower nightly). 사용 가능한 모든 " "버전은 `Docker Hub `__에 있습니다." -#: ../../source/docker/use-a-different-version.rst:9 +#: ../../source/docker/use-a-different-version.rst:10 #, fuzzy msgid "" "When using Flower nightly, the SuperLink nightly image must be paired " @@ -4104,34 +4320,35 @@ msgstr "" "연합식으로 ` 와 비교했을 때 몇 가지 사항만 " "변경 하면 됩니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 msgid "Centralized Training" msgstr "중앙 집중식 훈련" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:14 +#, fuzzy msgid "" "All files are revised based on :doc:`Example: PyTorch - From Centralized " "To Federated `. The only " -"thing to do is modifying the file called :code:`cifar.py`, revised part " -"is shown below:" +"thing to do is modifying the file called ``cifar.py``, revised part is " +"shown below:" msgstr "" "모든 파일은 :doc:`예제: 파이토치 -중앙 집중식에서 연합식으로 `를 기반으로 수정합니다. :code:`cifar.py`라는 파일을 수정하기만 하면 되며, 수정된 부분은 " "아래와 같습니다:" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:18 msgid "" "The model architecture defined in class Net() is added with Batch " "Normalization layers accordingly." msgstr "Net() 클래스에 정의된 모델 아키텍처는 그에 따라 배치 정규화 레이어가 추가됩니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:171 msgid "You can now run your machine learning workload:" msgstr "이제 머신 러닝 워크로드를 실행할 수 있습니다:" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 msgid "" "So far this should all look fairly familiar if you've used PyTorch " "before. Let's take the next step and use what we've built to create a " @@ -4141,19 +4358,20 @@ msgstr "" "지금까지는 파이토치를 사용해 본 적이 있다면 상당히 익숙하게 보일 것입니다. 다음 단계로 넘어가서 우리가 구축한 것을 사용하여 " "FedBN 내에서 하나의 서버와 두 개의 클라이언트로 구성된 연합학습 시스템을 만들어 보겠습니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:58 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:182 msgid "Federated Training" msgstr "연합 훈련" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:60 +#, fuzzy msgid "" "If you have read :doc:`Example: PyTorch - From Centralized To Federated " "`, the following parts are" -" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " -"function in :code:`client.py` needed to revise. If not, please read the " -":doc:`Example: PyTorch - From Centralized To Federated `. first." +" easy to follow, only ``get_parameters`` and ``set_parameters`` function " +"in ``client.py`` needed to revise. If not, please read the :doc:`Example:" +" PyTorch - From Centralized To Federated `. first." msgstr "" ":doc:`예제: 파이토치 - 중앙 집중식에서 연합식으로 `를 읽었다면, 다음 부분은 쉽게 따라할 수 있으며 :code:`client.py`의 " @@ -4161,30 +4379,32 @@ msgstr "" ":doc:`예제: 파이토치 - 중앙 집중식에서 연합식으로 `를 먼저 읽어보세요." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:66 +#, fuzzy msgid "" "Our example consists of one *server* and two *clients*. In FedBN, " -":code:`server.py` keeps unchanged, we can start the server directly." +"``server.py`` keeps unchanged, we can start the server directly." msgstr "" "이 예제는 하나의 *서버*와 두 개의 *클라이언트*로 구성됩니다. FedBN에서 :code:`server.py`는 변경되지 않고 " "그대로 유지되므로 서버를 바로 시작할 수 있습니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:73 +#, fuzzy msgid "" -"Finally, we will revise our *client* logic by changing " -":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " -"we will exclude batch normalization parameters from model parameter list " -"when sending to or receiving from the server." +"Finally, we will revise our *client* logic by changing ``get_parameters``" +" and ``set_parameters`` in ``client.py``, we will exclude batch " +"normalization parameters from model parameter list when sending to or " +"receiving from the server." msgstr "" "마지막으로, :code:`client.py`에서 :code:`get_parameters` 및 " ":code:`set_parameters`를 변경하여 *client* 로직을 수정할 것입니다. 서버로 보내거나 서버에서 받을 때 모델" " 파라미터 목록에서 배치 정규화 파라미터를 제외할 수 있습니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:102 msgid "Now, you can now open two additional terminal windows and run" msgstr "이제 두 개의 터미널 창을 추가로 열고 다음을 실행할 수 있습니다" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:108 msgid "" "in each window (make sure that the server is still running before you do " "so) and see your (previously centralized) PyTorch project run federated " @@ -4193,13 +4413,13 @@ msgstr "" "를 입력하고(클릭하기 전에 서버가 계속 실행 중인지 확인하세요), (이전에 중앙 집중된) PyTorch 프로젝트가 두 클라이언트에서" " FedBN으로 연합 학습을 실행하는 것을 확인합니다. 축하합니다!" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 -#: ../../source/tutorial-quickstart-jax.rst:283 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:113 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:349 +#: ../../source/tutorial-quickstart-jax.rst:319 msgid "Next Steps" msgstr "다음 단계" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:115 msgid "" "The full source code for this example can be found `here " "`_을 " "참조하세요." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:18 +#, fuzzy msgid "" -"Let's create a new file called :code:`cifar.py` with all the components " +"Let's create a new file called ``cifar.py`` with all the components " "required for a traditional (centralized) training on CIFAR-10. First, all" -" required packages (such as :code:`torch` and :code:`torchvision`) need " -"to be imported. You can see that we do not import any package for " -"federated learning. You can keep all these imports as they are even when " -"we add the federated learning components at a later point." +" required packages (such as ``torch`` and ``torchvision``) need to be " +"imported. You can see that we do not import any package for federated " +"learning. You can keep all these imports as they are even when we add the" +" federated learning components at a later point." msgstr "" "CIFAR-10에 대한 기존 (중앙 집중식) 교육에 필요한 모든 구성 요소가 포함된 :code:`cifar.py`라는 새 파일을 " "생성해 보겠습니다. 먼저, 필요한 모든 패키지(예: :code:`torch` 및 :code:`torchvision`)를 가져와야 " "합니다. 연합 학습을 위한 패키지를 가져오지 않는 것을 확인 할 수 있습니. 나중에 연합 학습 구성 요소를 추가할 때에도 이러한 " "모든 가져오기를 그대로 유지할 수 있습니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:36 +#, fuzzy msgid "" "As already mentioned we will use the CIFAR-10 dataset for this machine " "learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in :code:`class Net()`." +"Neural Network) is defined in ``class Net()``." msgstr "" "이미 언급했듯이 이 머신 러닝 워크로드에는 CIFAR-10 데이터 세트를 사용합니다. 모델 아키텍처(매우 간단한 컨볼루션 신경망)는" " :code:`class Net()`에 정의되어 있습니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:62 +#, fuzzy msgid "" -"The :code:`load_data()` function loads the CIFAR-10 training and test " -"sets. The :code:`transform` normalized the data after loading." +"The ``load_data()`` function loads the CIFAR-10 training and test sets. " +"The ``transform`` normalized the data after loading." msgstr "" ":code:`load_data()` 함수는 CIFAR-10 훈련 및 테스트 세트를 로드합니다. :code:`transform`은 " "로드 후 데이터를 정규화합니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:84 +#, fuzzy msgid "" -"We now need to define the training (function :code:`train()`) which loops" -" over the training set, measures the loss, backpropagates it, and then " +"We now need to define the training (function ``train()``) which loops " +"over the training set, measures the loss, backpropagates it, and then " "takes one optimizer step for each batch of training examples." msgstr "" "이제 학습 집합을 반복하고, 손실을 측정하고, 이를 역전파한 다음 각 학습 예제 배치에 대해 하나의 최적화 단계를 수행하는 " "학습(함수 :code:`train()`)을 정의해야 합니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:88 +#, fuzzy msgid "" -"The evaluation of the model is defined in the function :code:`test()`. " -"The function loops over all test samples and measures the loss of the " -"model based on the test dataset." +"The evaluation of the model is defined in the function ``test()``. The " +"function loops over all test samples and measures the loss of the model " +"based on the test dataset." msgstr "" "모델 평가는 :code:`test()` 함수에 정의되어 있습니다. 이 함수는 모든 테스트 샘플을 반복하고 테스트 데이터 세트에 따라" " 모델의 손실을 측정합니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:149 msgid "" "Having defined the data loading, model architecture, training, and " "evaluation we can put everything together and train our CNN on CIFAR-10." msgstr "데이터 로딩, 모델 아키텍처, 훈련 및 평가를 정의했으면 모든 것을 종합하여 CIFAR-10에서 CNN을 훈련할 수 있습니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:177 msgid "" "So far, this should all look fairly familiar if you've used PyTorch " "before. Let's take the next step and use what we've built to create a " @@ -4313,7 +4538,7 @@ msgstr "" "지금까지는 파이토치를 사용해 본 적이 있다면 상당히 익숙하게 보일 것입니다. 다음 단계로 넘어가서 구축한 것을 사용하여 하나의 " "서버와 두 개의 클라이언트로 구성된 간단한 연합 학습 시스템을 만들어 보겠습니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:184 msgid "" "The simple machine learning project discussed in the previous section " "trains the model on a single dataset (CIFAR-10), we call this centralized" @@ -4328,17 +4553,18 @@ msgstr "" "것입니다. 일반적으로 머신 러닝 워크로드를 연합 방식으로 실행하려면 대부분의 코드를 변경하고 모든 것을 처음부터 다시 설정해야 " "합니다. 이는 상당한 노력이 필요할 수 있습니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 msgid "" "However, with Flower you can evolve your pre-existing code into a " "federated learning setup without the need for a major rewrite." msgstr "하지만 Flower를 사용하면 대대적인 재작성 없이도 기존 코드를 연합 학습 설정으로 발전시킬 수 있습니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:194 +#, fuzzy msgid "" "The concept is easy to understand. We have to start a *server* and then " -"use the code in :code:`cifar.py` for the *clients* that are connected to " -"the *server*. The *server* sends model parameters to the clients. The " +"use the code in ``cifar.py`` for the *clients* that are connected to the " +"*server*. The *server* sends model parameters to the clients. The " "*clients* run the training and update the parameters. The updated " "parameters are sent back to the *server* which averages all received " "parameter updates. This describes one round of the federated learning " @@ -4349,46 +4575,48 @@ msgstr "" "업데이트된 파라미터는 *서버*로 다시 전송되며, *서버*는 수신된 모든 파라미터 업데이트의 평균을 구합니다. 이것은 연합 학습 " "프로세스의 한 라운드를 설명하며 여러 라운드에 걸쳐 이 과정을 반복합니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 -#: ../../source/tutorial-quickstart-jax.rst:129 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:201 +#: ../../source/tutorial-quickstart-jax.rst:147 +#, fuzzy msgid "" "Our example consists of one *server* and two *clients*. Let's set up " -":code:`server.py` first. The *server* needs to import the Flower package " -":code:`flwr`. Next, we use the :code:`start_server` function to start a " -"server and tell it to perform three rounds of federated learning." +"``server.py`` first. The *server* needs to import the Flower package " +"``flwr``. Next, we use the ``start_server`` function to start a server " +"and tell it to perform three rounds of federated learning." msgstr "" "이 예제는 하나의 *서버*와 두 개의 *클라이언트*로 구성됩니다. 먼저 :code:`server.py`를 설정해 보겠습니다. " "*server*는 Flower 패키지 :code:`flwr`를 가져와야 합니다. 다음으로, :code:`start_server` " "함수를 사용하여 서버를 시작하고 세 차례의 연합 학습을 수행하도록 지시합니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -#: ../../source/tutorial-quickstart-jax.rst:139 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:215 +#: ../../source/tutorial-quickstart-jax.rst:161 msgid "We can already start the *server*:" msgstr "이미 *서버*를 시작할 수 있습니다:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 +#, fuzzy msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined centralized training in :code:`cifar.py`. " -"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " -"update the parameters on our PyTorch model:" +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined centralized training in ``cifar.py``. Our " +"*client* needs to import ``flwr``, but also ``torch`` to update the " +"parameters on our PyTorch model:" msgstr "" "마지막으로, :code:`client.py`에서 *client* 로직을 정의하고 :code:`cifar.py`에서 이전에 정의한 " "중앙 집중식 학습을 기반으로 구축합니다. *클라이언트*는 :code:`flwr`을 가져와야 하며, PyTorch 모델의 파라미터를 " "업데이트하기 위해 :code:`torch`도 가져와야 합니다:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:238 +#, fuzzy msgid "" "Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " -"some of the boilerplate that would otherwise be necessary. " -":code:`CifarClient` needs to implement four methods, two methods for " -"getting/setting model parameters, one method for training the model, and " -"one method for testing the model:" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``CifarClient``. ``NumPyClient`` is slightly easier to implement " +"than ``Client`` if you use a framework with good NumPy interoperability " +"(like PyTorch or TensorFlow/Keras) because it avoids some of the " +"boilerplate that would otherwise be necessary. ``CifarClient`` needs to " +"implement four methods, two methods for getting/setting model parameters," +" one method for training the model, and one method for testing the model:" msgstr "" "Flower *클라이언트*를 구현한다는 것은 기본적으로 :code:`flwr.client.Client` 또는 " ":code:`flwr.client.NumPyClient`의 서브클래스를 구현하는 것을 의미합니다. 우리의 구현은 " @@ -4398,112 +4626,119 @@ msgstr "" "code:`CifarClient`는 모델 파라미터를 가져오거나 설정하는 메서드 2개, 모델 학습을 위한 메서드 1개, 모델 테스트를" " 위한 메서드 1개 등 네 가지 메서드를 구현해야 합니다:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -msgid ":code:`set_parameters`" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#, fuzzy +msgid "``set_parameters``" msgstr ":code:`set_parameters`" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -#: ../../source/tutorial-quickstart-jax.rst:166 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:248 +#: ../../source/tutorial-quickstart-jax.rst:192 msgid "" "set the model parameters on the local model that are received from the " "server" msgstr "서버에서 수신한 로컬 모델의 모델 파라미터를 설정합니다" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 -#: ../../source/tutorial-quickstart-jax.rst:168 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#: ../../source/tutorial-quickstart-jax.rst:194 +#, fuzzy msgid "" -"loop over the list of model parameters received as NumPy " -":code:`ndarray`'s (think list of neural network layers)" +"loop over the list of model parameters received as NumPy ``ndarray``'s " +"(think list of neural network layers)" msgstr "(신경망 레이어 목록으로 생각하면 됩니다) NumPy :code:`ndarray`로 받은 모델 파라미터 목록에 대해 반복합니다" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -#: ../../source/tutorial-quickstart-jax.rst:169 -#: ../../source/tutorial-quickstart-scikitlearn.rst:118 -msgid ":code:`get_parameters`" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#: ../../source/tutorial-quickstart-scikitlearn.rst:129 +#, fuzzy +msgid "``get_parameters``" msgstr ":code:`get_parameters`" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 -#: ../../source/tutorial-quickstart-jax.rst:170 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#, fuzzy msgid "" -"get the model parameters and return them as a list of NumPy " -":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" +"get the model parameters and return them as a list of NumPy ``ndarray``'s" +" (which is what ``flwr.client.NumPyClient`` expects)" msgstr "" "모델 매개변수를 가져와서 NumPy :code:`ndarray`의 목록으로 반환합니다(이는 " ":code:`flwr.client.NumPyClient`가 기대하는 바와 같습니다)" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid ":code:`fit`" -msgstr ":code:`fit`" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +#, fuzzy +msgid "``fit``" +msgstr "``DISTRO``" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 -#: ../../source/tutorial-quickstart-jax.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:176 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:255 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:260 +#: ../../source/tutorial-quickstart-jax.rst:200 +#: ../../source/tutorial-quickstart-jax.rst:205 msgid "" "update the parameters of the local model with the parameters received " "from the server" msgstr "서버에서 받은 파라미터로 로컬 모델의 파라미터를 업데이트합니다" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 msgid "train the model on the local training set" msgstr "로컬 훈련 세트에서 모델을 훈련합니다" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:258 msgid "get the updated local model weights and return them to the server" msgstr "업데이트된 로컬 모델 가중치를 가져와 서버로 반환합니다" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -#: ../../source/tutorial-quickstart-jax.rst:178 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid ":code:`evaluate`" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +#: ../../source/tutorial-quickstart-jax.rst:208 +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 +#, fuzzy +msgid "``evaluate``" msgstr ":code:`evaluate`" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 -#: ../../source/tutorial-quickstart-jax.rst:177 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:262 +#: ../../source/tutorial-quickstart-jax.rst:207 msgid "evaluate the updated model on the local test set" msgstr "로컬 테스트 세트에서 업데이트된 모델을 평가합니다" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 msgid "return the local loss and accuracy to the server" msgstr "로컬 손실 및 정확도를 서버에 반환합니다" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:265 +#, fuzzy msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`test()` previously " -"defined in :code:`cifar.py`. So what we really do here is we tell Flower " -"through our :code:`NumPyClient` subclass which of our already defined " -"functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``test()`` previously defined in ``cifar.py``. " +"So what we really do here is we tell Flower through our ``NumPyClient`` " +"subclass which of our already defined functions to call for training and " +"evaluation. We included type annotations to give you a better " +"understanding of the data types that get passed around." msgstr "" "두 개의 :code:`NumPyClient` 메서드인 :code:`fit`과 :code:`evaluate`는 이전에 " ":code:`cifar.py`에 정의된 함수인 :code:`train()`과 :code:`test()`를 활용합니다. 따라서 여기서" " 실제로 하는 일은 :code:`NumPyClient` 서브클래스를 통해 이미 정의된 함수 중 훈련과 평가를 위해 호출할 함수를 " "Flower에 알려주는 것입니다. 전달되는 데이터 유형을 더 잘 이해할 수 있도록 type annotations을 포함했습니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:315 +#, fuzzy msgid "" "All that's left to do it to define a function that loads both model and " -"data, creates a :code:`CifarClient`, and starts this client. You load " -"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " -"with the function :code:`fl.client.start_client()` by pointing it at the " -"same IP address we used in :code:`server.py`:" +"data, creates a ``CifarClient``, and starts this client. You load your " +"data and model by using ``cifar.py``. Start ``CifarClient`` with the " +"function ``fl.client.start_client()`` by pointing it at the same IP " +"address we used in ``server.py``:" msgstr "" "이제 모델과 데이터를 모두 로드하는 함수를 정의하고, :code:`CifarClient`를 생성하고, 이 클라이언트를 시작하는 " "작업만 남았습니다. 코드:`cifar.py`를 사용하여 데이터와 모델을 로드합니다. :code:`server.py`에서 사용한 것과" " 동일한 IP 주소를 지정하여 :code:`fl.client.start_client()` 함수로 " ":code:`CifarClient`를 시작합니다:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 -#: ../../source/tutorial-quickstart-jax.rst:274 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:338 +#: ../../source/tutorial-quickstart-jax.rst:309 msgid "And that's it. You can now open two additional terminal windows and run" msgstr "여기까지입니다. 이제 두 개의 터미널 창을 추가로 열고 다음을 실행할 수 있습니다" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:344 msgid "" "in each window (make sure that the server is running before you do so) " "and see your (previously centralized) PyTorch project run federated " @@ -4512,7 +4747,7 @@ msgstr "" "를 입력하고(그 전에 서버가 실행 중인지 확인하세요) (이전에는 중앙 집중식) PyTorch 프로젝트가 두 클라이언트에서 연합 " "학습을 실행하는 것을 확인합니다. 축하합니다!" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:351 msgid "" "The full source code for this example: `PyTorch: From Centralized To " "Federated (Code) `_를 확인하는 것이 좋습니다." -#: ../../source/how-to-authenticate-supernodes.rst:15 +#: ../../source/how-to-authenticate-supernodes.rst:20 msgid "" "This guide covers a preview feature that might change in future versions " "of Flower." msgstr "이 가이드에서는 향후 버전의 Flower에서 변경될 수 있는 미리보기 기능에 대해 설명합니다." -#: ../../source/how-to-authenticate-supernodes.rst:18 +#: ../../source/how-to-authenticate-supernodes.rst:24 msgid "" "For increased security, node authentication can only be used when " "encrypted connections (SSL/TLS) are enabled." msgstr "보안을 강화하기 위해 노드 인증은 암호화된 연결(SSL/TLS)을 사용하도록 설정한 경우에만 사용할 수 있습니다." -#: ../../source/how-to-authenticate-supernodes.rst:21 -msgid "Enable node authentication in :code:`SuperLink`" +#: ../../source/how-to-authenticate-supernodes.rst:28 +#, fuzzy +msgid "Enable node authentication in ``SuperLink``" msgstr ":code:`SuperLink`에서 노드 인증 활성화" -#: ../../source/how-to-authenticate-supernodes.rst:23 +#: ../../source/how-to-authenticate-supernodes.rst:30 +#, fuzzy msgid "" "To enable node authentication, first you need to configure SSL/TLS " "connections to secure the SuperLink<>SuperNode communication. You can " "find the complete guide `here `_. After configuring secure connections, you" -" can enable client authentication in a long-running Flower " -":code:`SuperLink`. Use the following terminal command to start a Flower " -":code:`SuperNode` that has both secure connections and node " -"authentication enabled:" +" can enable client authentication in a long-running Flower ``SuperLink``." +" Use the following terminal command to start a Flower ``SuperNode`` that " +"has both secure connections and node authentication enabled:" msgstr "" "노드 인증을 활성화하려면 먼저 SuperLink<>SuperNode 통신을 보호하기 위해 SSL/TLS 연결을 구성해야 합니다. " "전체 가이드는 `여기 `. Here's a nonsensical example that customizes :code:`FedAvg`" -" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" -" to the config dict of a *single client* (only the first client in the " -"list, the other clients in this round to not receive this \"special\" " -"config value):" +"strategies>`. Here's a nonsensical example that customizes ``FedAvg`` by " +"adding a custom ``\"hello\": \"world\"`` configuration key/value pair to " +"the config dict of a *single client* (only the first client in the list, " +"the other clients in this round to not receive this \"special\" config " +"value):" msgstr "" "이는 기존 전략을 사용자 지정하거나 :doc:`implementing a custom strategy from scratch " "`를 통해 수행할 수 있습니다. 다음은 사용자 지정 ``\"hello\"'를 " @@ -6156,27 +6405,29 @@ msgstr "" " 정보를 표시합니다:" #: ../../source/how-to-configure-logging.rst:13 +#, fuzzy msgid "" "containing relevant information including: log message level (e.g. " -":code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging " -"took place from, as well as the log message itself. In this way, the " -"logger would typically display information on your terminal as follows:" +"``INFO``, ``DEBUG``), a timestamp, the line where the logging took place " +"from, as well as the log message itself. In this way, the logger would " +"typically display information on your terminal as follows:" msgstr "" "로그 메시지 수준(예: :code:`INFO`, :code:`DEBUG`), 타임스탬프, 로깅이 발생한 줄, 로그 메시지 자체 등 " "관련 정보를 포함합니다. 이러한 방식으로 로거는 일반적으로 다음과 같은 정보를 터미널에 표시합니다:" -#: ../../source/how-to-configure-logging.rst:34 +#: ../../source/how-to-configure-logging.rst:35 msgid "Saving log to file" msgstr "파일에 로그 저장" -#: ../../source/how-to-configure-logging.rst:36 +#: ../../source/how-to-configure-logging.rst:37 +#, fuzzy msgid "" "By default, the Flower log is outputted to the terminal where you launch " "your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do :code:`fl.server.start_server`) and when " -"using the :code:`VirtualClientEngine` (i.e. when you do " -":code:`fl.simulation.start_simulation`). In some situations you might " -"want to save this log to disk. You can do so by calling the " +"federation (i.e. when you do ``fl.server.start_server``) and when using " +"the ``VirtualClientEngine`` (i.e. when you do " +"``fl.simulation.start_simulation``). In some situations you might want to" +" save this log to disk. You can do so by calling the " "`fl.common.logger.configure() " "`_" " function. For example:" @@ -6189,22 +6440,23 @@ msgstr "" "`_" " 함수를 호출하여 저장할 수 있습니다. 예를 들어:" -#: ../../source/how-to-configure-logging.rst:53 +#: ../../source/how-to-configure-logging.rst:59 +#, fuzzy msgid "" "With the above, Flower will record the log you see on your terminal to " -":code:`log.txt`. This file will be created in the same directory as were " -"you are running the code from. If we inspect we see the log above is also" -" recorded but prefixing with :code:`identifier` each line:" +"``log.txt``. This file will be created in the same directory as were you " +"are running the code from. If we inspect we see the log above is also " +"recorded but prefixing with ``identifier`` each line:" msgstr "" "위와 같이 하면 Flower는 터미널에 표시되는 로그를 :code:`log.txt`에 기록합니다. 이 파일은 코드를 실행한 " "디렉터리와 동일한 디렉터리에 생성됩니다. 검사해보면 위의 로그도 기록되지만 각 줄 앞에 :code:`identifier` 접두사가 " "붙는 것을 확인할 수 있습니다:" -#: ../../source/how-to-configure-logging.rst:74 +#: ../../source/how-to-configure-logging.rst:81 msgid "Log your own messages" msgstr "나만의 메시지 기록" -#: ../../source/how-to-configure-logging.rst:76 +#: ../../source/how-to-configure-logging.rst:83 msgid "" "You might expand the information shown by default with the Flower logger " "by adding more messages relevant to your application. You can achieve " @@ -6213,27 +6465,27 @@ msgstr "" "애플리케이션과 관련된 메시지를 더 추가하여 Flower 로거에 기본적으로 표시되는 정보를 확장할 수 있습니다. 다음과 같이 쉽게 " "추가할 수 있습니다." -#: ../../source/how-to-configure-logging.rst:102 +#: ../../source/how-to-configure-logging.rst:114 msgid "" "In this way your logger will show, in addition to the default messages, " "the ones introduced by the clients as specified above." msgstr "이렇게 하면 로거에 기본 메시지 외에 위에서 지정한 대로 클라이언트가 소개한 메시지가 표시됩니다." -#: ../../source/how-to-configure-logging.rst:128 +#: ../../source/how-to-configure-logging.rst:140 msgid "Log to a remote service" msgstr "원격 서비스에 로그인" -#: ../../source/how-to-configure-logging.rst:130 +#: ../../source/how-to-configure-logging.rst:142 +#, fuzzy msgid "" -"The :code:`fl.common.logger.configure` function, also allows specifying a" -" host to which logs can be pushed (via :code:`POST`) through a native " -"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" -" feature in :code:`gRPC`-based Federated Learning workloads where " -"otherwise gathering logs from all entities (i.e. the server and the " -"clients) might be cumbersome. Note that in Flower simulation, the server " -"automatically displays all logs. You can still specify a " -":code:`HTTPHandler` should you wish to backup or analyze the logs " -"somewhere else." +"The ``fl.common.logger.configure`` function, also allows specifying a " +"host to which logs can be pushed (via ``POST``) through a native Python " +"``logging.handler.HTTPHandler``. This is a particularly useful feature in" +" ``gRPC``-based Federated Learning workloads where otherwise gathering " +"logs from all entities (i.e. the server and the clients) might be " +"cumbersome. Note that in Flower simulation, the server automatically " +"displays all logs. You can still specify a ``HTTPHandler`` should you " +"wish to backup or analyze the logs somewhere else." msgstr "" "또한 :code:`fl.common.logger.configure` 함수를 사용하면 네이티브 Python " ":code:`logging.handler.HTTPHandler`를 통해 로그를 푸시할 수 있는 호스트를 지정할 수 " @@ -6246,15 +6498,16 @@ msgid "Enable SSL connections" msgstr "SSL 연결 사용" #: ../../source/how-to-enable-ssl-connections.rst:4 +#, fuzzy msgid "" "This guide describes how to a SSL-enabled secure Flower server " -"(:code:`SuperLink`) can be started and how a Flower client " -"(:code:`SuperNode`) can establish a secure connections to it." +"(``SuperLink``) can be started and how a Flower client (``SuperNode``) " +"can establish a secure connections to it." msgstr "" "이 가이드에서는 SSL을 지원하는 보안 Flower 서버(:코드:`SuperLink`)를 시작하는 방법과 Flower " "클라이언트(:코드:`SuperNode`)가 이 서버에 보안 연결을 설정하는 방법을 설명합니다." -#: ../../source/how-to-enable-ssl-connections.rst:7 +#: ../../source/how-to-enable-ssl-connections.rst:8 msgid "" "A complete code example demonstrating a secure connection can be found " "`here `_'에서 확인할 수 있습니다." -#: ../../source/how-to-enable-ssl-connections.rst:10 +#: ../../source/how-to-enable-ssl-connections.rst:11 +#, fuzzy msgid "" -"The code example comes with a :code:`README.md` file which explains how " -"to start it. Although it is already SSL-enabled, it might be less " +"The code example comes with a ``README.md`` file which explains how to " +"start it. Although it is already SSL-enabled, it might be less " "descriptive on how it does so. Stick to this guide for a deeper " "introduction to the topic." msgstr "" @@ -6279,26 +6533,29 @@ msgid "Certificates" msgstr "인증서" #: ../../source/how-to-enable-ssl-connections.rst:18 +#, fuzzy msgid "" "Using SSL-enabled connections requires certificates to be passed to the " "server and client. For the purpose of this guide we are going to generate" " self-signed certificates. As this can become quite complex we are going " -"to ask you to run the script in :code:`examples/advanced-" -"tensorflow/certificates/generate.sh` with the following command sequence:" +"to ask you to run the script in ``examples/advanced-" +"tensorflow/certificates/generate.sh`` with the following command " +"sequence:" msgstr "" "SSL 사용 연결을 사용하려면 서버와 클라이언트에 인증서를 전달해야 합니다. 이 가이드에서는 자체 서명된 인증서를 생성하겠습니다. " "이 과정은 상당히 복잡할 수 있으므로 다음 명령 시퀀스를 사용하여 :code:`examples/advanced-" "tensorflow/certificates/generate.sh`에서 스크립트를 실행하도록 요청하겠습니다:" #: ../../source/how-to-enable-ssl-connections.rst:29 +#, fuzzy msgid "" -"This will generate the certificates in :code:`examples/advanced-" -"tensorflow/.cache/certificates`." +"This will generate the certificates in ``examples/advanced-" +"tensorflow/.cache/certificates``." msgstr "" "이렇게 하면 :code:`examples/advanced-tensorflow/.cache/certificates`에 인증서가 " "생성됩니다." -#: ../../source/how-to-enable-ssl-connections.rst:31 +#: ../../source/how-to-enable-ssl-connections.rst:32 msgid "" "The approach for generating SSL certificates in the context of this " "example can serve as an inspiration and starting point, but it should not" @@ -6312,40 +6569,41 @@ msgstr "" "됩니다. 프로덕션 환경용 인증서를 올바르게 생성하는 문제에 대해서는 다른 출처를 참조하세요. 중요하지 않은 프로토타이핑 또는 연구 " "프로젝트의 경우, 이 가이드에 언급된 스크립트를 사용하여 생성한 자체 서명 인증서를 사용하는 것으로 충분할 수 있습니다." -#: ../../source/how-to-enable-ssl-connections.rst:39 +#: ../../source/how-to-enable-ssl-connections.rst:40 msgid "Server (SuperLink)" msgstr "서버(SuperLink)" -#: ../../source/how-to-enable-ssl-connections.rst:41 +#: ../../source/how-to-enable-ssl-connections.rst:42 msgid "" "Use the following terminal command to start a sever (SuperLink) that uses" " the previously generated certificates:" msgstr "다음 터미널 명령을 사용하여 이전에 생성한 인증서를 사용하는 서버(SuperLink)를 시작합니다:" -#: ../../source/how-to-enable-ssl-connections.rst:50 +#: ../../source/how-to-enable-ssl-connections.rst:52 msgid "" "When providing certificates, the server expects a tuple of three " "certificates paths: CA certificate, server certificate and server private" " key." msgstr "인증서를 제공할 때 서버는 세 가지 인증서 경로의 튜플을 기대합니다: CA 인증서, 서버 인증서 및 서버 개인 키입니다." -#: ../../source/how-to-enable-ssl-connections.rst:54 +#: ../../source/how-to-enable-ssl-connections.rst:56 msgid "Client (SuperNode)" msgstr "클라이언트(SuperNode)" -#: ../../source/how-to-enable-ssl-connections.rst:56 +#: ../../source/how-to-enable-ssl-connections.rst:58 msgid "" "Use the following terminal command to start a client (SuperNode) that " "uses the previously generated certificates:" msgstr "다음 터미널 명령을 사용하여 이전에 생성한 인증서를 사용하는 클라이언트(SuperNode)를 시작합니다:" -#: ../../source/how-to-enable-ssl-connections.rst:64 +#: ../../source/how-to-enable-ssl-connections.rst:67 +#, fuzzy msgid "" -"When setting :code:`root_certificates`, the client expects a file path to" -" PEM-encoded root certificates." +"When setting ``root_certificates``, the client expects a file path to " +"PEM-encoded root certificates." msgstr "코드:`root_certificates`를 설정하면 클라이언트는 PEM 인코딩된 루트 인증서의 파일 경로를 예상합니다." -#: ../../source/how-to-enable-ssl-connections.rst:70 +#: ../../source/how-to-enable-ssl-connections.rst:73 msgid "" "You should now have learned how to generate self-signed certificates " "using the given script, start an SSL-enabled server and have a client " @@ -6354,21 +6612,21 @@ msgstr "" "이제 주어진 스크립트를 사용하여 자체 서명 인증서를 생성하고, SSL 사용 서버를 시작하고, 클라이언트가 보안 연결을 설정하는 " "방법을 배웠을 것입니다." -#: ../../source/how-to-enable-ssl-connections.rst:75 +#: ../../source/how-to-enable-ssl-connections.rst:78 msgid "Additional resources" msgstr "추가 리소스" -#: ../../source/how-to-enable-ssl-connections.rst:77 +#: ../../source/how-to-enable-ssl-connections.rst:80 msgid "" "These additional sources might be relevant if you would like to dive " "deeper into the topic of certificates:" msgstr "인증서에 대해 더 자세히 알아보고 싶다면 이러한 추가 자료를 참고하세요:" -#: ../../source/how-to-enable-ssl-connections.rst:79 +#: ../../source/how-to-enable-ssl-connections.rst:83 msgid "`Let's Encrypt `_" msgstr "'암호화하세요 `_'" -#: ../../source/how-to-enable-ssl-connections.rst:80 +#: ../../source/how-to-enable-ssl-connections.rst:84 msgid "`certbot `_" msgstr "`인증봇 `_" @@ -6390,13 +6648,15 @@ msgstr "" "결정합니다. Flower는 아래에 설명된 것과 동일한 API를 기반으로 하는 몇 가지 기본 제공 전략을 제공합니다." #: ../../source/how-to-implement-strategies.rst:11 -msgid "The :code:`Strategy` abstraction" +#, fuzzy +msgid "The ``Strategy`` abstraction" msgstr ":code:`Strategy` 추상화" #: ../../source/how-to-implement-strategies.rst:13 +#, fuzzy msgid "" "All strategy implementation are derived from the abstract base class " -":code:`flwr.server.strategy.Strategy`, both built-in implementations and " +"``flwr.server.strategy.Strategy``, both built-in implementations and " "third party implementations. This means that custom strategy " "implementations have the exact same capabilities at their disposal as " "built-in ones." @@ -6411,55 +6671,60 @@ msgid "" "implemented:" msgstr "전략 추상화에서는 구현해야 하는 몇 가지 추상적인 메서드를 정의합니다:" -#: ../../source/how-to-implement-strategies.rst:75 +#: ../../source/how-to-implement-strategies.rst:67 +#, fuzzy msgid "" -"Creating a new strategy means implementing a new :code:`class` (derived " -"from the abstract base class :code:`Strategy`) that implements for the " -"previously shown abstract methods:" +"Creating a new strategy means implementing a new ``class`` (derived from " +"the abstract base class ``Strategy``) that implements for the previously " +"shown abstract methods:" msgstr "" "새 전략을 생성한다는 것은 이전에 표시된 추상 메서드에 대해 구현하는 새로운 :code:`class`(추상 기본 클래스 " ":code:`Strategy`에서 파생됨)를 구현하는 것을 의미합니다:" -#: ../../source/how-to-implement-strategies.rst:100 +#: ../../source/how-to-implement-strategies.rst:97 msgid "The Flower server calls these methods in the following order:" msgstr "Flower 서버는 다음 순서로 이러한 메서드를 호출합니다:" -#: ../../source/how-to-implement-strategies.rst:177 +#: ../../source/how-to-implement-strategies.rst:174 msgid "The following sections describe each of those methods in more detail." msgstr "다음 섹션에서는 이러한 각 방법에 대해 자세히 설명합니다." -#: ../../source/how-to-implement-strategies.rst:180 -msgid "The :code:`initialize_parameters` method" +#: ../../source/how-to-implement-strategies.rst:177 +#, fuzzy +msgid "The ``initialize_parameters`` method" msgstr ":code:`initialize_parameters` 메서드" -#: ../../source/how-to-implement-strategies.rst:182 +#: ../../source/how-to-implement-strategies.rst:179 +#, fuzzy msgid "" -":code:`initialize_parameters` is called only once, at the very beginning " -"of an execution. It is responsible for providing the initial global model" -" parameters in a serialized form (i.e., as a :code:`Parameters` object)." +"``initialize_parameters`` is called only once, at the very beginning of " +"an execution. It is responsible for providing the initial global model " +"parameters in a serialized form (i.e., as a ``Parameters`` object)." msgstr "" "code:`initialize_parameters`는 실행을 처음 시작할 때 한 번만 호출됩니다. 이 함수는 초기 전역 모델 " "파라미터를 직렬화된 형식(즉, :code:`Parameters` 객체)으로 제공하는 역할을 합니다." -#: ../../source/how-to-implement-strategies.rst:184 +#: ../../source/how-to-implement-strategies.rst:183 +#, fuzzy msgid "" "Built-in strategies return user-provided initial parameters. The " "following example shows how initial parameters can be passed to " -":code:`FedAvg`:" +"``FedAvg``:" msgstr "" "기본 제공 전략은 사용자가 제공한 초기 매개 변수를 반환합니다. 다음 예는 초기 매개 변수를 :code:`FedAvg`에 전달하는 " "방법을 보여줍니다:" #: ../../source/how-to-implement-strategies.rst:209 +#, fuzzy msgid "" -"The Flower server will call :code:`initialize_parameters`, which either " -"returns the parameters that were passed to :code:`initial_parameters`, or" -" :code:`None`. If no parameters are returned from " -":code:`initialize_parameters` (i.e., :code:`None`), the server will " -"randomly select one client and ask it to provide its parameters. This is " -"a convenience feature and not recommended in practice, but it can be " -"useful for prototyping. In practice, it is recommended to always use " -"server-side parameter initialization." +"The Flower server will call ``initialize_parameters``, which either " +"returns the parameters that were passed to ``initial_parameters``, or " +"``None``. If no parameters are returned from ``initialize_parameters`` " +"(i.e., ``None``), the server will randomly select one client and ask it " +"to provide its parameters. This is a convenience feature and not " +"recommended in practice, but it can be useful for prototyping. In " +"practice, it is recommended to always use server-side parameter " +"initialization." msgstr "" "Flower 서버는 :code:`initialize_parameters`를 호출하여 " ":code:`initial_parameters`에 전달된 파라미터를 반환하거나 :code:`None`을 반환합니다. " @@ -6467,7 +6732,7 @@ msgstr "" "클라이언트 하나를 선택하여 해당 클라이언트에 매개변수를 제공하도록 요청합니다. 이는 편의 기능이며 실제로는 권장하지 않지만 " "프로토타이핑에는 유용할 수 있습니다. 실제로는 항상 서버 측 매개변수 초기화를 사용하는 것이 좋습니다." -#: ../../source/how-to-implement-strategies.rst:213 +#: ../../source/how-to-implement-strategies.rst:218 msgid "" "Server-side parameter initialization is a powerful mechanism. It can be " "used, for example, to resume training from a previously saved checkpoint." @@ -6478,220 +6743,240 @@ msgstr "" "서버 측 파라미터 초기화는 강력한 메커니즘입니다. 예를 들어 이전에 저장한 체크포인트에서 학습을 재개하는 데 사용할 수 있습니다. " "또한 연합 학습을 사용하여 사전 학습된 모델을 미세 조정하는 등 하이브리드 접근 방식을 구현하는 데 필요한 기본 기능입니다." -#: ../../source/how-to-implement-strategies.rst:216 -msgid "The :code:`configure_fit` method" +#: ../../source/how-to-implement-strategies.rst:224 +#, fuzzy +msgid "The ``configure_fit`` method" msgstr ":code:`configure_fit` 메서드" -#: ../../source/how-to-implement-strategies.rst:218 +#: ../../source/how-to-implement-strategies.rst:226 +#, fuzzy msgid "" -":code:`configure_fit` is responsible for configuring the upcoming round " -"of training. What does *configure* mean in this context? Configuring a " -"round means selecting clients and deciding what instructions to send to " -"these clients. The signature of :code:`configure_fit` makes this clear:" +"``configure_fit`` is responsible for configuring the upcoming round of " +"training. What does *configure* mean in this context? Configuring a round" +" means selecting clients and deciding what instructions to send to these " +"clients. The signature of ``configure_fit`` makes this clear:" msgstr "" ":code:`configure_fit`은 다가오는 학 라운드를 구성하는 역할을 합니다. 이 문맥에서 *구성*은 무엇을 의미하나요? " "라운드를 구성한다는 것은 클라이언트를 선택하고 이 클라이언트에게 어떤 지침을 보낼지 결정하는 것을 의미합니다. " "code:`configure_fit`의 시그니처를 보면 이를 명확히 알 수 있습니다:" -#: ../../source/how-to-implement-strategies.rst:231 +#: ../../source/how-to-implement-strategies.rst:239 +#, fuzzy msgid "" "The return value is a list of tuples, each representing the instructions " "that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_fit`:" +"usually perform the following steps in ``configure_fit``:" msgstr "" "반환 값은 튜플 목록으로, 각 튜플은 특정 클라이언트로 전송될 명령어를 나타냅니다. 전략 구현은 일반적으로 " ":code:`configure_fit`에서 다음 단계를 수행합니다:" -#: ../../source/how-to-implement-strategies.rst:233 -#: ../../source/how-to-implement-strategies.rst:280 +#: ../../source/how-to-implement-strategies.rst:243 +#: ../../source/how-to-implement-strategies.rst:307 +#, fuzzy msgid "" -"Use the :code:`client_manager` to randomly sample all (or a subset of) " -"available clients (each represented as a :code:`ClientProxy` object)" +"Use the ``client_manager`` to randomly sample all (or a subset of) " +"available clients (each represented as a ``ClientProxy`` object)" msgstr "" ":code:`client_manager`를 사용하여 사용 가능한 모든 클라이언트(또는 그 하위 집합)를 무작위로 샘플링합니다(각각 " ":code:`ClientProxy` 개체로 표시됨)" -#: ../../source/how-to-implement-strategies.rst:234 +#: ../../source/how-to-implement-strategies.rst:245 +#, fuzzy msgid "" -"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " -"current global model :code:`parameters` and :code:`config` dict" +"Pair each ``ClientProxy`` with the same ``FitIns`` holding the current " +"global model ``parameters`` and ``config`` dict" msgstr "" "각 :code:`ClientProxy`를 현재 글로벌 모델 :code:`parameters` 및 :code:`config` " "dict를 보유한 동일한 :code:`FitIns`와 쌍을 이룹니다" -#: ../../source/how-to-implement-strategies.rst:236 +#: ../../source/how-to-implement-strategies.rst:248 +#, fuzzy msgid "" -"More sophisticated implementations can use :code:`configure_fit` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_fit`." +"More sophisticated implementations can use ``configure_fit`` to implement" +" custom client selection logic. A client will only participate in a round" +" if the corresponding ``ClientProxy`` is included in the list returned " +"from ``configure_fit``." msgstr "" "보다 정교한 구현은 :code:`configure_fit`을 사용하여 사용자 지정 클라이언트 선택 로직을 구현할 수 있습니다. " "클라이언트는 :code:`configure_fit`에서 반환된 목록에 해당 :code:`ClientProxy`가 포함된 경우에만 " "라운드에 참여합니다." -#: ../../source/how-to-implement-strategies.rst:240 +#: ../../source/how-to-implement-strategies.rst:254 +#, fuzzy msgid "" "The structure of this return value provides a lot of flexibility to the " "user. Since instructions are defined on a per-client basis, different " "instructions can be sent to each client. This enables custom strategies " "to train, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" "이 반환 값의 구조는 사용자에게 많은 유연성을 제공합니다. instructions은 클라이언트별로 정의되므로 각 클라이언트에 서로 " "다른 명령어를 전송할 수 있습니다. 이를 통해 예를 들어 클라이언트마다 다른 모델을 학습시키거나 클라이언트마다 다른 하이퍼파라미터를" " 사용하는 사용자 지정 전략을 사용할 수 있습니다(:code:`config` dict를 통해)." -#: ../../source/how-to-implement-strategies.rst:243 -msgid "The :code:`aggregate_fit` method" +#: ../../source/how-to-implement-strategies.rst:261 +#, fuzzy +msgid "The ``aggregate_fit`` method" msgstr ":code:`aggregate_fit` 메서드" -#: ../../source/how-to-implement-strategies.rst:245 +#: ../../source/how-to-implement-strategies.rst:263 +#, fuzzy msgid "" -":code:`aggregate_fit` is responsible for aggregating the results returned" -" by the clients that were selected and asked to train in " -":code:`configure_fit`." +"``aggregate_fit`` is responsible for aggregating the results returned by " +"the clients that were selected and asked to train in ``configure_fit``." msgstr "" "code:`aggregate_fit`은 :code:`configure_fit`에서 훈련하도록 선택되고 요청된 클라이언트가 반환한 " "결과를 집계하는 역할을 담당합니다." -#: ../../source/how-to-implement-strategies.rst:258 +#: ../../source/how-to-implement-strategies.rst:277 +#, fuzzy msgid "" "Of course, failures can happen, so there is no guarantee that the server " "will get results from all the clients it sent instructions to (via " -":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " -"of :code:`results`, but also a list of :code:`failures`." +"``configure_fit``). ``aggregate_fit`` therefore receives a list of " +"``results``, but also a list of ``failures``." msgstr "" "물론 실패가 발생할 수 있으므로 서버가 명령을 보낸 모든 클라이언트로부터 결과를 얻을 수 있다는 보장은 " "없습니다(:code:`configure_fit`을 통해). 따라서 :code:`aggregate_fit`은 " ":code:`results` 목록뿐만 아니라 :code:`failures` 목록도 받습니다." -#: ../../source/how-to-implement-strategies.rst:260 +#: ../../source/how-to-implement-strategies.rst:282 +#, fuzzy msgid "" -":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" -" dictionary of aggregated metrics. The :code:`Parameters` return value is" -" optional because :code:`aggregate_fit` might decide that the results " -"provided are not sufficient for aggregation (e.g., too many failures)." +"``aggregate_fit`` returns an optional ``Parameters`` object and a " +"dictionary of aggregated metrics. The ``Parameters`` return value is " +"optional because ``aggregate_fit`` might decide that the results provided" +" are not sufficient for aggregation (e.g., too many failures)." msgstr "" "code:`aggregate_fit`은 선택적 :code:`Parameters` 개체와 집계된 메트릭의 dictionary를 " "반환합니다. :code:`Parameters` 반환 값은 :code:`aggregate_fit`이 제공된 결과가 집계에 충분하지 " "않다고 판단할 수 있으므로(예: 실패 수가 너무 많음) 선택 사항입니다." -#: ../../source/how-to-implement-strategies.rst:263 -msgid "The :code:`configure_evaluate` method" +#: ../../source/how-to-implement-strategies.rst:288 +#, fuzzy +msgid "The ``configure_evaluate`` method" msgstr ":code:`configure_evaluate` 메서드" -#: ../../source/how-to-implement-strategies.rst:265 +#: ../../source/how-to-implement-strategies.rst:290 +#, fuzzy msgid "" -":code:`configure_evaluate` is responsible for configuring the upcoming " -"round of evaluation. What does *configure* mean in this context? " -"Configuring a round means selecting clients and deciding what " -"instructions to send to these clients. The signature of " -":code:`configure_evaluate` makes this clear:" +"``configure_evaluate`` is responsible for configuring the upcoming round " +"of evaluation. What does *configure* mean in this context? Configuring a " +"round means selecting clients and deciding what instructions to send to " +"these clients. The signature of ``configure_evaluate`` makes this clear:" msgstr "" ":code:`configure_evaluate`는 다가오는 평가 라운드를 구성하는 역할을 합니다. 이 문맥에서 *구성*은 무엇을 " "의미하나요? 라운드를 구성한다는 것은 클라이언트를 선택하고 이러한 클라이언트에 전송할 지침을 결정하는 것을 의미합니다. " ":code:`configure_evaluate`의 시그니처를 보면 이를 명확히 알 수 있습니다:" -#: ../../source/how-to-implement-strategies.rst:278 +#: ../../source/how-to-implement-strategies.rst:303 +#, fuzzy msgid "" "The return value is a list of tuples, each representing the instructions " "that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_evaluate`:" +"usually perform the following steps in ``configure_evaluate``:" msgstr "" "반환 값은 튜플 목록으로, 각 튜플은 특정 클라이언트로 전송될 명령어를 나타냅니다. 전략 구현은 일반적으로 " ":code:`configure_evaluate`에서 다음 단계를 수행합니다:" -#: ../../source/how-to-implement-strategies.rst:281 +#: ../../source/how-to-implement-strategies.rst:309 +#, fuzzy msgid "" -"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " -"the current global model :code:`parameters` and :code:`config` dict" +"Pair each ``ClientProxy`` with the same ``EvaluateIns`` holding the " +"current global model ``parameters`` and ``config`` dict" msgstr "" "각 :code:`ClientProxy`를 현재 글로벌 모델 :code:`parameters` 및 :code:`config` " "dict를 보유한 동일한 :code:`EvaluateIns`와 쌍을 이룹니다" -#: ../../source/how-to-implement-strategies.rst:283 +#: ../../source/how-to-implement-strategies.rst:312 +#, fuzzy msgid "" -"More sophisticated implementations can use :code:`configure_evaluate` to " +"More sophisticated implementations can use ``configure_evaluate`` to " "implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_evaluate`." +"in a round if the corresponding ``ClientProxy`` is included in the list " +"returned from ``configure_evaluate``." msgstr "" "보다 정교한 구현은 :code:`configure_evaluate`를 사용하여 사용자 지정 클라이언트 선택 로직을 구현할 수 " "있습니다. 클라이언트는 :code:`configure_evaluate`에서 반환된 목록에 해당 :code:`ClientProxy`가" " 포함된 경우에만 라운드에 참여합니다." -#: ../../source/how-to-implement-strategies.rst:287 +#: ../../source/how-to-implement-strategies.rst:318 +#, fuzzy msgid "" "The structure of this return value provides a lot of flexibility to the " "user. Since instructions are defined on a per-client basis, different " "instructions can be sent to each client. This enables custom strategies " "to evaluate, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" "이 반환 값의 구조는 사용자에게 많은 유연성을 제공합니다. 명령어는 클라이언트별로 정의되므로 각 클라이언트에 서로 다른 명령어를 " "전송할 수 있습니다. 이를 통해 사용자 지정 전략을 통해 예를 들어 클라이언트마다 다른 모델을 평가하거나 클라이언트마다 다른 " "하이퍼파라미터를 사용할 수 있습니다(:code:`config` dict를 통해)." -#: ../../source/how-to-implement-strategies.rst:291 -msgid "The :code:`aggregate_evaluate` method" +#: ../../source/how-to-implement-strategies.rst:325 +#, fuzzy +msgid "The ``aggregate_evaluate`` method" msgstr ":code:`aggregate_evaluate` 메서드" -#: ../../source/how-to-implement-strategies.rst:293 +#: ../../source/how-to-implement-strategies.rst:327 +#, fuzzy msgid "" -":code:`aggregate_evaluate` is responsible for aggregating the results " +"``aggregate_evaluate`` is responsible for aggregating the results " "returned by the clients that were selected and asked to evaluate in " -":code:`configure_evaluate`." +"``configure_evaluate``." msgstr "" "code:`aggregate_evaluate`는 :code:`configure_evaluate`에서 선택되어 평가를 요청한 " "클라이언트가 반환한 결과를 집계하는 역할을 담당합니다." -#: ../../source/how-to-implement-strategies.rst:306 +#: ../../source/how-to-implement-strategies.rst:341 +#, fuzzy msgid "" "Of course, failures can happen, so there is no guarantee that the server " "will get results from all the clients it sent instructions to (via " -":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " -"receives a list of :code:`results`, but also a list of :code:`failures`." +"``configure_evaluate``). ``aggregate_evaluate`` therefore receives a list" +" of ``results``, but also a list of ``failures``." msgstr "" "물론 실패가 발생할 수 있으므로 서버가 명령을 보낸 모든 클라이언트로부터 결과를 얻을 수 있다는 보장은 " "없습니다(:code:`configure_evaluate`를 통해). 따라서 :code:`aggregate_evaluate`는 " ":code:`results` 목록뿐만 아니라 :code:`failures` 목록도 받습니다." -#: ../../source/how-to-implement-strategies.rst:308 +#: ../../source/how-to-implement-strategies.rst:346 +#, fuzzy msgid "" -":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" -" dictionary of aggregated metrics. The :code:`float` return value is " -"optional because :code:`aggregate_evaluate` might decide that the results" -" provided are not sufficient for aggregation (e.g., too many failures)." +"``aggregate_evaluate`` returns an optional ``float`` (loss) and a " +"dictionary of aggregated metrics. The ``float`` return value is optional " +"because ``aggregate_evaluate`` might decide that the results provided are" +" not sufficient for aggregation (e.g., too many failures)." msgstr "" "code:`aggregate_evaluate`는 선택적 :code:`float`(손실)와 집계된 메트릭의 dictionary를 " "반환합니다. code:`float` 반환 값은 :code:`aggregate_evaluate`가 제공된 결과가 집계에 충분하지 " "않다고 판단할 수 있으므로(예: 실패 수가 너무 많음) 선택 사항입니다." -#: ../../source/how-to-implement-strategies.rst:311 -msgid "The :code:`evaluate` method" +#: ../../source/how-to-implement-strategies.rst:352 +#, fuzzy +msgid "The ``evaluate`` method" msgstr ":code:`evaluate` 메서드" -#: ../../source/how-to-implement-strategies.rst:313 +#: ../../source/how-to-implement-strategies.rst:354 +#, fuzzy msgid "" -":code:`evaluate` is responsible for evaluating model parameters on the " -"server-side. Having :code:`evaluate` in addition to " -":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " -"to perform both servers-side and client-side (federated) evaluation." +"``evaluate`` is responsible for evaluating model parameters on the " +"server-side. Having ``evaluate`` in addition to " +"``configure_evaluate``/``aggregate_evaluate`` enables strategies to " +"perform both servers-side and client-side (federated) evaluation." msgstr "" ":code:`evaluate`는 서버 측에서 모델 매개변수를 평가하는 역할을 담당합니다. " "code:`configure_evaluate`/:code:`aggregate_evaluate`와 함께 " ":code:`evaluate`를 사용하면 서버 측과 클라이언트 측(federated) 평가를 모두 수행할 수 있는 전략을 사용할 수" " 있습니다." -#: ../../source/how-to-implement-strategies.rst:323 +#: ../../source/how-to-implement-strategies.rst:364 +#, fuzzy msgid "" "The return value is again optional because the strategy might not need to" " implement server-side evaluation or because the user-defined " -":code:`evaluate` method might not complete successfully (e.g., it might " -"fail to load the server-side evaluation data)." +"``evaluate`` method might not complete successfully (e.g., it might fail " +"to load the server-side evaluation data)." msgstr "" "반환 값은 전략에서 서버 측 평가를 구현할 필요가 없거나 사용자 정의 :code:`evaluate` 메서드가 성공적으로 완료되지 " "않을 수 있기 때문에(예: 서버 측 평가 데이터를 로드하지 못할 수 있음) 다시 선택 사항으로 설정할 수 있습니다." @@ -6700,60 +6985,63 @@ msgstr "" msgid "Install Flower" msgstr "Flower 설치" -#: ../../source/how-to-install-flower.rst:6 +#: ../../source/how-to-install-flower.rst:5 msgid "Python version" msgstr "Python 버전" -#: ../../source/how-to-install-flower.rst:12 +#: ../../source/how-to-install-flower.rst:11 msgid "Install stable release" msgstr "안정적인 릴리즈 설치" -#: ../../source/how-to-install-flower.rst:15 -#: ../../source/how-to-upgrade-to-flower-next.rst:46 +#: ../../source/how-to-install-flower.rst:14 +#: ../../source/how-to-upgrade-to-flower-next.rst:66 msgid "Using pip" msgstr "pip 사용" -#: ../../source/how-to-install-flower.rst:17 -msgid "" -"Stable releases are available on `PyPI " -"`_::" +#: ../../source/how-to-install-flower.rst:16 +#, fuzzy +msgid "Stable releases are available on `PyPI `_:" msgstr "안정적인 릴리즈는 `PyPI `_:: 에서 확인할 수 있습니다::" -#: ../../source/how-to-install-flower.rst:21 +#: ../../source/how-to-install-flower.rst:22 +#, fuzzy msgid "" "For simulations that use the Virtual Client Engine, ``flwr`` should be " -"installed with the ``simulation`` extra::" +"installed with the ``simulation`` extra:" msgstr "가상 클라이언트 엔진을 사용하는 시뮬레이션의 경우 ``flwr``을 ``simulation``extra와 함께 설치해야 합니다:" -#: ../../source/how-to-install-flower.rst:27 +#: ../../source/how-to-install-flower.rst:30 msgid "Using conda (or mamba)" msgstr "conda(또는 mamba) 사용" -#: ../../source/how-to-install-flower.rst:29 +#: ../../source/how-to-install-flower.rst:32 msgid "Flower can also be installed from the ``conda-forge`` channel." msgstr "Flower은 'conda-forge' 채널에서도 설치할 수 있습니다." -#: ../../source/how-to-install-flower.rst:31 +#: ../../source/how-to-install-flower.rst:34 +#, fuzzy msgid "" "If you have not added ``conda-forge`` to your channels, you will first " -"need to run the following::" +"need to run the following:" msgstr "채널에 'conda-forge'를 추가하지 않은 경우 먼저 다음을 실행해야 합니다:" -#: ../../source/how-to-install-flower.rst:36 +#: ../../source/how-to-install-flower.rst:42 +#, fuzzy msgid "" "Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " -"installed with ``conda``::" +"installed with ``conda``:" msgstr "conda-forge`` 채널이 활성화되면 ``flwr``을 ``conda``로 설치할 수 있습니다::" -#: ../../source/how-to-install-flower.rst:40 -msgid "or with ``mamba``::" +#: ../../source/how-to-install-flower.rst:49 +#, fuzzy +msgid "or with ``mamba``:" msgstr "또는 ``mamba``::" -#: ../../source/how-to-install-flower.rst:46 +#: ../../source/how-to-install-flower.rst:56 msgid "Verify installation" msgstr "설치 확인" -#: ../../source/how-to-install-flower.rst:48 +#: ../../source/how-to-install-flower.rst:58 #, fuzzy msgid "" "The following command can be used to verify if Flower was successfully " @@ -6763,54 +7051,58 @@ msgstr "" "다음 명령을 사용하여 Flower가 성공적으로 설치되었는지 확인할 수 있습니다. 모든 것이 정상적으로 작동하면 명령줄에 " "Flower의 버전이 출력됩니다:" -#: ../../source/how-to-install-flower.rst:58 +#: ../../source/how-to-install-flower.rst:68 msgid "Advanced installation options" msgstr "고급 설치 옵션" -#: ../../source/how-to-install-flower.rst:61 +#: ../../source/how-to-install-flower.rst:71 msgid "Install via Docker" msgstr "Docker를 통해 설치" -#: ../../source/how-to-install-flower.rst:63 +#: ../../source/how-to-install-flower.rst:73 #, fuzzy msgid ":doc:`Run Flower using Docker `" msgstr ":doc:`Docker를 사용하여 Flower를 실행하는 방법 `" -#: ../../source/how-to-install-flower.rst:66 +#: ../../source/how-to-install-flower.rst:76 msgid "Install pre-release" msgstr "사전 릴리즈 설치" -#: ../../source/how-to-install-flower.rst:68 +#: ../../source/how-to-install-flower.rst:78 +#, fuzzy msgid "" "New (possibly unstable) versions of Flower are sometimes available as " "pre-release versions (alpha, beta, release candidate) before the stable " -"release happens::" +"release happens:" msgstr "" "새(불안정할 수 있는) 버전의 Flower는 안정 버전이 출시되기 전에 사전 릴리즈 버전(알파, 베타, 릴리즈 후보)으로 제공되는 " "경우가 있습니다:" -#: ../../source/how-to-install-flower.rst:72 +#: ../../source/how-to-install-flower.rst:85 +#, fuzzy msgid "" "For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" -" should be installed with the ``simulation`` extra::" +" should be installed with the ``simulation`` extra:" msgstr "" "가상 클라이언트 엔진을 사용하는 시뮬레이션의 경우 ``flwr`` 사전 릴리즈를 ``simulation`` extra와 함께 " "설치해야 합니다:" -#: ../../source/how-to-install-flower.rst:77 +#: ../../source/how-to-install-flower.rst:93 msgid "Install nightly release" msgstr "야간 릴리즈 설치" -#: ../../source/how-to-install-flower.rst:79 +#: ../../source/how-to-install-flower.rst:95 +#, fuzzy msgid "" "The latest (potentially unstable) changes in Flower are available as " -"nightly releases::" +"nightly releases:" msgstr "Flower의 최신 (불안정할 수 있는) 변경 사항은 다음과 같이 야간 릴리즈로 제공됩니다:" -#: ../../source/how-to-install-flower.rst:83 +#: ../../source/how-to-install-flower.rst:101 +#, fuzzy msgid "" "For simulations that use the Virtual Client Engine, ``flwr-nightly`` " -"should be installed with the ``simulation`` extra::" +"should be installed with the ``simulation`` extra:" msgstr "" "가상 클라이언트 엔진을 사용하는 시뮬레이션의 경우, ``flwr-nightly``를 ``simulation`` extr와 함께 " "설치해야 합니다::" @@ -6831,7 +7123,7 @@ msgstr "" "강력하며 클라이언트별 리소스 할당 방법을 결정하고 총 사용량을 제한할 수 있습니다. 리소스 소비에 대한 인사이트를 통해 더 현명한 " "결정을 내리고 실행 시간을 단축할 수 있습니다." -#: ../../source/how-to-monitor-simulation.rst:6 +#: ../../source/how-to-monitor-simulation.rst:9 msgid "" "The specific instructions assume you are using macOS and have the " "`Homebrew `_ package manager installed." @@ -6839,11 +7131,11 @@ msgstr "" "구체적인 지침은 macOS를 사용 중이고 'Homebrew `_ 패키지 관리자가 설치되어 있다고 " "가정합니다." -#: ../../source/how-to-monitor-simulation.rst:10 +#: ../../source/how-to-monitor-simulation.rst:13 msgid "Downloads" msgstr "다운로드" -#: ../../source/how-to-monitor-simulation.rst:16 +#: ../../source/how-to-monitor-simulation.rst:19 msgid "" "`Prometheus `_ is used for data collection, while" " `Grafana `_ will enable you to visualize the " @@ -6854,27 +7146,27 @@ msgstr "" "`_는 수집된 데이터를 시각화할 수 있게 해줍니다. 이 두 도구는 모두 Flower가 " "내부적으로 사용하는 `Ray `_와 잘 통합되어 있습니다." -#: ../../source/how-to-monitor-simulation.rst:18 +#: ../../source/how-to-monitor-simulation.rst:23 msgid "" "Overwrite the configuration files (depending on your device, it might be " "installed on a different path)." msgstr "구성 파일을 덮어씁니다(장치에 따라 다른 경로에 설치되어 있을 수 있음)." -#: ../../source/how-to-monitor-simulation.rst:20 +#: ../../source/how-to-monitor-simulation.rst:26 msgid "If you are on an M1 Mac, it should be:" msgstr "M1 Mac을 사용 중이라면:" -#: ../../source/how-to-monitor-simulation.rst:27 +#: ../../source/how-to-monitor-simulation.rst:33 msgid "On the previous generation Intel Mac devices, it should be:" msgstr "이전 세대 Intel Mac 장치에서는:" -#: ../../source/how-to-monitor-simulation.rst:34 +#: ../../source/how-to-monitor-simulation.rst:40 msgid "" "Open the respective configuration files and change them. Depending on " "your device, use one of the two following commands:" msgstr "각 구성 파일을 열고 변경합니다. 장치에 따라 다음 두 명령 중 하나를 사용합니다:" -#: ../../source/how-to-monitor-simulation.rst:44 +#: ../../source/how-to-monitor-simulation.rst:51 msgid "" "and then delete all the text in the file and paste a new Prometheus " "config you see below. You may adjust the time intervals to your " @@ -6883,7 +7175,7 @@ msgstr "" "를 입력한 다음 파일의 모든 텍스트를 삭제하고 아래에 표시된 새 Prometheus 설정을 붙여넣습니다. 요구 사항에 따라 시간 " "간격을 조정할 수 있습니다:" -#: ../../source/how-to-monitor-simulation.rst:59 +#: ../../source/how-to-monitor-simulation.rst:67 msgid "" "Now after you have edited the Prometheus configuration, do the same with " "the Grafana configuration files. Open those using one of the following " @@ -6892,55 +7184,56 @@ msgstr "" "이제 Prometheus 구성을 편집한 후 Grafana 구성 파일에 대해서도 동일한 작업을 수행합니다. 이전과 마찬가지로 다음 " "명령 중 하나를 사용하여 파일을 엽니다:" -#: ../../source/how-to-monitor-simulation.rst:69 +#: ../../source/how-to-monitor-simulation.rst:78 msgid "" "Your terminal editor should open and allow you to apply the following " "configuration as before." msgstr "터미널 편집기가 열리면 이전과 마찬가지로 다음 구성을 적용할 수 있습니다." -#: ../../source/how-to-monitor-simulation.rst:84 +#: ../../source/how-to-monitor-simulation.rst:94 msgid "" "Congratulations, you just downloaded all the necessary software needed " "for metrics tracking. Now, let’s start it." msgstr "축하합니다. 매트릭 트레킹에 필요한 모든 소프트웨어를 다운로드하셨습니다. 이제 시작해 보겠습니다." -#: ../../source/how-to-monitor-simulation.rst:88 +#: ../../source/how-to-monitor-simulation.rst:98 msgid "Tracking metrics" msgstr "매트릭 트래킹" -#: ../../source/how-to-monitor-simulation.rst:90 +#: ../../source/how-to-monitor-simulation.rst:100 msgid "" "Before running your Flower simulation, you have to start the monitoring " "tools you have just installed and configured." msgstr "Flower 시뮬레이션을 실행하기 전에 방금 설치 및 구성한 모니터링 도구를 시작해야 합니다." -#: ../../source/how-to-monitor-simulation.rst:97 +#: ../../source/how-to-monitor-simulation.rst:108 msgid "" "Please include the following argument in your Python code when starting a" " simulation." msgstr "시뮬레이션을 시작할 때 Python 코드에 다음 전달인자를 포함하세요." -#: ../../source/how-to-monitor-simulation.rst:108 +#: ../../source/how-to-monitor-simulation.rst:119 msgid "Now, you are ready to start your workload." msgstr "이제 워크로드를 시작할 준비가 되었습니다." -#: ../../source/how-to-monitor-simulation.rst:110 +#: ../../source/how-to-monitor-simulation.rst:121 msgid "" "Shortly after the simulation starts, you should see the following logs in" " your terminal:" msgstr "시뮬레이션이 시작되고 얼마 지나지 않아 터미널에 다음 로그가 표시됩니다:" -#: ../../source/how-to-monitor-simulation.rst:117 -msgid "You can look at everything at ``_ ." +#: ../../source/how-to-monitor-simulation.rst:127 +#, fuzzy +msgid "You can look at everything at http://127.0.0.1:8265 ." msgstr "``_ 에서 모든 것을 볼 수 있습니다." -#: ../../source/how-to-monitor-simulation.rst:119 +#: ../../source/how-to-monitor-simulation.rst:129 msgid "" "It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" " lowest option)." msgstr "Ray 대시보드입니다. 메트릭(왼쪽 패널의 가장 아래 옵션)으로 이동할 수 있습니다." -#: ../../source/how-to-monitor-simulation.rst:121 +#: ../../source/how-to-monitor-simulation.rst:132 msgid "" "Or alternatively, you can just see them in Grafana by clicking on the " "right-up corner, “View in Grafana”. Please note that the Ray dashboard is" @@ -6952,26 +7245,27 @@ msgstr "" "시뮬레이션 중에만 액세스할 수 있다는 점에 유의하세요. 시뮬레이션이 종료된 후에는 Grafana를 사용하여 메트릭을 탐색할 수만 " "있습니다. ``http://localhost:3000/``로 이동하여 Grafana를 시작할 수 있습니다." -#: ../../source/how-to-monitor-simulation.rst:123 +#: ../../source/how-to-monitor-simulation.rst:137 +#, fuzzy msgid "" "After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port :code:`3000` on " -"your machine as long as they are running." +"important as they will otherwise block, for example port ``3000`` on your" +" machine as long as they are running." msgstr "" "시각화를 완료한 후에는 Prometheus와 Grafana를 중지합니다. 그렇지 않으면 실행 중인 동안 컴퓨터에서 포트 " ":code:`3000` 등을 차단하므로 이 작업이 중요합니다." -#: ../../source/how-to-monitor-simulation.rst:132 +#: ../../source/how-to-monitor-simulation.rst:147 msgid "Resource allocation" msgstr "리소스 할당" -#: ../../source/how-to-monitor-simulation.rst:134 +#: ../../source/how-to-monitor-simulation.rst:149 msgid "" "You must understand how the Ray library works to efficiently allocate " "system resources to simulation clients on your own." msgstr "Ray 라이브러리가 어떻게 작동하는지 이해해야 시뮬레이션 클라이언트에 시스템 리소스를 효율적으로 할당할 수 있습니다." -#: ../../source/how-to-monitor-simulation.rst:136 +#: ../../source/how-to-monitor-simulation.rst:152 msgid "" "Initially, the simulation (which Ray handles under the hood) starts by " "default with all the available resources on the system, which it shares " @@ -6984,21 +7278,21 @@ msgstr "" "클라이언트 간에 공유됩니다. 그렇다고 해서 모든 클라이언트에게 균등하게 분배하거나 모든 클라이언트에서 동시에 모델 학습이 이루어지는" " 것은 아닙니다. 이에 대한 자세한 내용은 이 블로그의 뒷부분에서 설명합니다. 다음을 실행하여 시스템 리소스를 확인할 수 있습니다:" -#: ../../source/how-to-monitor-simulation.rst:143 +#: ../../source/how-to-monitor-simulation.rst:164 msgid "In Google Colab, the result you see might be similar to this:" msgstr "Google Colab에서는 이와 유사한 결과가 표시될 수 있습니다:" -#: ../../source/how-to-monitor-simulation.rst:155 +#: ../../source/how-to-monitor-simulation.rst:175 msgid "" "However, you can overwrite the defaults. When starting a simulation, do " "the following (you don't need to overwrite all of them):" msgstr "그러나 기본값을 덮어쓸 수 있습니다. 시뮬레이션을 시작할 때 다음을 수행합니다(모두 덮어쓸 필요는 없음):" -#: ../../source/how-to-monitor-simulation.rst:175 +#: ../../source/how-to-monitor-simulation.rst:195 msgid "Let’s also specify the resource for a single client." msgstr "단일 클라이언트에 대한 리소스도 지정해 보겠습니다." -#: ../../source/how-to-monitor-simulation.rst:205 +#: ../../source/how-to-monitor-simulation.rst:225 msgid "" "Now comes the crucial part. Ray will start a new client only when it has " "all the required resources (such that they run in parallel) when the " @@ -7007,29 +7301,30 @@ msgstr "" "이제 중요한 부분이 나옵니다. Ray는 리소스가 허용하는 경우에만 필요한 모든 리소스가 있을 때(병렬로 실행되는 등) 새 " "클라이언트를 시작합니다." -#: ../../source/how-to-monitor-simulation.rst:207 +#: ../../source/how-to-monitor-simulation.rst:228 +#, fuzzy msgid "" "In the example above, only one client will be run, so your clients won't " -"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " -"running two clients and therefore enable them to run concurrently. Be " -"careful not to require more resources than available. If you specified " -":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " -"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." +"run concurrently. Setting ``client_num_gpus = 0.5`` would allow running " +"two clients and therefore enable them to run concurrently. Be careful not" +" to require more resources than available. If you specified " +"``client_num_gpus = 2``, the simulation wouldn't start (even if you had 2" +" GPUs but decided to set 1 in ``ray_init_args``)." msgstr "" "위의 예에서는 하나의 클라이언트만 실행되므로 클라이언트가 동시에 실행되지 않습니다. :code:`client_num_gpus = " "0.5` 를 설정하면 두 개의 클라이언트를 실행할 수 있으므로 동시에 실행할 수 있습니다. 사용 가능한 리소스보다 더 많은 리소스를" " 요구하지 않도록 주의하세요. :code:`client_num_gpus = 2`를 지정하면 시뮬레이션이 시작되지 않습니다(GPU가 " "2개이지만 :code:`ray_init_args`에서 1개를 설정한 경우에도 마찬가지입니다)." -#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 +#: ../../source/how-to-monitor-simulation.rst:235 ../../source/ref-faq.rst:2 msgid "FAQ" msgstr "자주 묻는 질문" -#: ../../source/how-to-monitor-simulation.rst:214 +#: ../../source/how-to-monitor-simulation.rst:237 msgid "Q: I don't see any metrics logged." msgstr "질문: 기록된 메트릭이 보이지 않습니다." -#: ../../source/how-to-monitor-simulation.rst:216 +#: ../../source/how-to-monitor-simulation.rst:239 msgid "" "A: The timeframe might not be properly set. The setting is in the top " "right corner (\"Last 30 minutes\" by default). Please change the " @@ -7038,7 +7333,7 @@ msgstr "" "A: 기간이 제대로 설정되지 않았을 수 있습니다. 설정은 오른쪽 상단에 있습니다(기본값은 '지난 30분'). 시뮬레이션이 실행된 " "기간을 반영하도록 기간을 변경해 주세요." -#: ../../source/how-to-monitor-simulation.rst:218 +#: ../../source/how-to-monitor-simulation.rst:243 msgid "" "Q: I see “Grafana server not detected. Please make sure the Grafana " "server is running and refresh this page” after going to the Metrics tab " @@ -7047,38 +7342,41 @@ msgstr "" "질문: \"Grafana 서버가 감지되지 않았습니다. Ray 대시보드의 메트릭 탭으로 이동한 후 Grafana 서버가 실행 중인지 " "확인하고 이 페이지를 새로고침하세요.\"라는 메시지가 표시됩니다." -#: ../../source/how-to-monitor-simulation.rst:220 +#: ../../source/how-to-monitor-simulation.rst:246 msgid "" "A: You probably don't have Grafana running. Please check the running " "services" msgstr "A: Grafana가 실행되고 있지 않을 수 있습니다. 실행 중인 서비스를 확인하세요" -#: ../../source/how-to-monitor-simulation.rst:226 +#: ../../source/how-to-monitor-simulation.rst:252 +#, fuzzy msgid "" "Q: I see \"This site can't be reached\" when going to " -"``_." +"http://127.0.0.1:8265." msgstr "Q: ``_로 이동할 때 \"이 사이트에 연결할 수 없습니다.\"라는 메시지가 표시됩니다." -#: ../../source/how-to-monitor-simulation.rst:228 +#: ../../source/how-to-monitor-simulation.rst:254 msgid "" "A: Either the simulation has already finished, or you still need to start" " Prometheus." msgstr "A: 시뮬레이션이 이미 완료되었거나 아직 Prometheus를 시작해야 합니다." -#: ../../source/how-to-monitor-simulation.rst:232 +#: ../../source/how-to-monitor-simulation.rst:257 msgid "Resources" msgstr "리소스" -#: ../../source/how-to-monitor-simulation.rst:234 +#: ../../source/how-to-monitor-simulation.rst:259 +#, fuzzy msgid "" -"Ray Dashboard: ``_" +"Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-" +"started.html" msgstr "" "Ray 대시보드: ``_" -#: ../../source/how-to-monitor-simulation.rst:236 -msgid "Ray Metrics: ``_" +#: ../../source/how-to-monitor-simulation.rst:261 +#, fuzzy +msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" msgstr "Ray 메트릭: ``_" #: ../../source/how-to-run-simulations.rst:2 @@ -7108,16 +7406,17 @@ msgstr "" "architecture.html#virtual-client-engine>`_ 또는 VCE를 통해 이러한 시나리오를 수용할 수 " "있습니다." -#: ../../source/how-to-run-simulations.rst:10 +#: ../../source/how-to-run-simulations.rst:19 +#, fuzzy msgid "" -"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" -" clients. These clients are identical to `non-virtual` clients (i.e. the " +"The ``VirtualClientEngine`` schedules, launches and manages `virtual` " +"clients. These clients are identical to `non-virtual` clients (i.e. the " "ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " "creating a class inheriting, for example, from `flwr.client.NumPyClient " "`_ and therefore behave in an " "identical way. In addition to that, clients managed by the " -":code:`VirtualClientEngine` are:" +"``VirtualClientEngine`` are:" msgstr "" ":code:`VirtualClientEngine`은 `virtual` 클라이언트를 예약, 실행 및 관리합니다. 이러한 클라이언트는 " "`non-virtual` 클라이언트(예: `flwr.client.start_client `_에서 상속하는 클래스 생성으로 구성될 수 있으므로 동일한 " "방식으로 동작합니다. 그 외에도 :code:`VirtualClientEngine`에 의해 관리되는 클라이언트는 다음과 같습니다:" -#: ../../source/how-to-run-simulations.rst:12 +#: ../../source/how-to-run-simulations.rst:26 msgid "" "resource-aware: this means that each client gets assigned a portion of " "the compute and memory on your system. You as a user can control this at " @@ -7137,16 +7436,17 @@ msgstr "" "시뮬레이션을 시작할 때 이를 제어할 수 있으며, 이를 통해 Flower FL 시뮬레이션의 병렬 처리 정도를 제어할 수 있습니다. " "클라이언트당 리소스가 적을수록 동일한 하드웨어에서 더 많은 클라이언트를 동시에 실행할 수 있습니다." -#: ../../source/how-to-run-simulations.rst:13 +#: ../../source/how-to-run-simulations.rst:31 +#, fuzzy msgid "" "self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " +" manually, instead this gets delegated to ``VirtualClientEngine``'s " "internals." msgstr "" "self-managed: 이는 사용자가 클라이언트를 수동으로 실행할 필요가 없으며, 대신 " ":code:`VirtualClientEngine`의 내부에 위임된다는 의미입니다." -#: ../../source/how-to-run-simulations.rst:14 +#: ../../source/how-to-run-simulations.rst:33 msgid "" "ephemeral: this means that a client is only materialized when it is " "required in the FL process (e.g. to do `fit() `_을 수행하기 위해). 객체는 나중에 소멸되어 할당된 리소스를 해제하고" " 다른 클라이언트가 참여할 수 있도록 허용합니다." -#: ../../source/how-to-run-simulations.rst:16 +#: ../../source/how-to-run-simulations.rst:38 +#, fuzzy msgid "" -"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " +"The ``VirtualClientEngine`` implements `virtual` clients using `Ray " "`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " -"of `Actors `_ to " -"spawn `virtual` clients and run their workload." +"workloads. In particular, Flower's ``VirtualClientEngine`` makes use of " +"`Actors `_ to spawn " +"`virtual` clients and run their workload." msgstr "" ":code:`VirtualClientEngine`은 확장 가능한 파이썬 워크로드를 위한 오픈 소스 프레임워크인 `Ray " "`_를 사용하여 `virtual` 클라이언트를 구현합니다. 특히 Flower의 " ":code:`VirtualClientEngine`은 `Actors `_를 사용하여 `virtual` 클라이언트를 생성하고 해당 워크로드를 실행합니다." -#: ../../source/how-to-run-simulations.rst:20 +#: ../../source/how-to-run-simulations.rst:45 msgid "Launch your Flower simulation" msgstr "Flower 시뮬레이션 시작" -#: ../../source/how-to-run-simulations.rst:22 +#: ../../source/how-to-run-simulations.rst:47 msgid "" "Running Flower simulations still require you to define your client class," " a strategy, and utility functions to download and load (and potentially " @@ -7189,22 +7490,23 @@ msgstr "" "flwr.html#flwr.simulation.start_simulation>`_을 사용하면 되며, 최소한의 예시는 다음과 " "같습니다:" -#: ../../source/how-to-run-simulations.rst:44 +#: ../../source/how-to-run-simulations.rst:73 msgid "VirtualClientEngine resources" msgstr "VirtualClientEngine 리소스" -#: ../../source/how-to-run-simulations.rst:45 +#: ../../source/how-to-run-simulations.rst:75 +#, fuzzy msgid "" "By default the VCE has access to all system resources (i.e. all CPUs, all" " GPUs, etc) since that is also the default behavior when starting Ray. " "However, in some settings you might want to limit how many of your system" " resources are used for simulation. You can do this via the " -":code:`ray_init_args` input argument to :code:`start_simulation` which " -"the VCE internally passes to Ray's :code:`ray.init` command. For a " -"complete list of settings you can configure check the `ray.init " +"``ray_init_args`` input argument to ``start_simulation`` which the VCE " +"internally passes to Ray's ``ray.init`` command. For a complete list of " +"settings you can configure check the `ray.init " "`_" -" documentation. Do not set :code:`ray_init_args` if you want the VCE to " -"use all your system's CPUs and GPUs." +" documentation. Do not set ``ray_init_args`` if you want the VCE to use " +"all your system's CPUs and GPUs." msgstr "" "기본적으로 VCE는 모든 시스템 리소스(예: 모든 CPU, 모든 GPU 등)에 액세스할 수 있으며, 이는 Ray를 시작할 때의 기본" " 동작이기도 합니다. 그러나 일부 설정에서는 시뮬레이션에 사용되는 시스템 리소스의 수를 제한하고 싶을 수 있습니다. 이 설정은 " @@ -7214,20 +7516,21 @@ msgstr "" " 설명서를 확인하세요. VCE가 시스템의 모든 CPU와 GPU를 사용하도록 하려면 :code:`ray_init_args`를 설정하지" " 마세요." -#: ../../source/how-to-run-simulations.rst:62 +#: ../../source/how-to-run-simulations.rst:97 msgid "Assigning client resources" msgstr "클라이언트 리소스 할당" -#: ../../source/how-to-run-simulations.rst:63 +#: ../../source/how-to-run-simulations.rst:99 +#, fuzzy msgid "" -"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" -" nothing else) to each virtual client. This means that if your system has" -" 10 cores, that many virtual clients can be concurrently running." +"By default the ``VirtualClientEngine`` assigns a single CPU core (and " +"nothing else) to each virtual client. This means that if your system has " +"10 cores, that many virtual clients can be concurrently running." msgstr "" "기본적으로 :code:`VirtualClientEngine`은 각 가상 클라이언트에 단일 CPU 코어를 할당합니다(그 외에는 " "아무것도 할당하지 않음). 즉, 시스템에 코어가 10개인 경우 그만큼 많은 가상 클라이언트를 동시에 실행할 수 있습니다." -#: ../../source/how-to-run-simulations.rst:65 +#: ../../source/how-to-run-simulations.rst:103 msgid "" "More often than not, you would probably like to adjust the resources your" " clients get assigned based on the complexity (i.e. compute and memory " @@ -7242,31 +7545,32 @@ msgstr "" "flwr.html#flwr.simulation.start_simulation>`_로 설정하여 이를 수행할 수 있습니다. Ray는 " "내부적으로 두 개의 키를 사용하여 워크로드(이 경우 Flower 클라이언트)를 스케줄링하고 스폰합니다:" -#: ../../source/how-to-run-simulations.rst:67 -msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +#: ../../source/how-to-run-simulations.rst:110 +#, fuzzy +msgid "``num_cpus`` indicates the number of CPU cores a client would get." msgstr ":code:`num_cpus`는 클라이언트에서 사용할 수 있는 CPU 코어 수를 나타냅니다." -#: ../../source/how-to-run-simulations.rst:68 -msgid "" -":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " -"assigned." +#: ../../source/how-to-run-simulations.rst:111 +#, fuzzy +msgid "``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned." msgstr ":code:`num_gpus`는 클라이언트에 할당되는 GPU 메모리의 **비율**을 나타냅니다." -#: ../../source/how-to-run-simulations.rst:70 +#: ../../source/how-to-run-simulations.rst:113 msgid "Let's see a few examples:" msgstr "몇 가지 예를 살펴보겠습니다:" -#: ../../source/how-to-run-simulations.rst:89 +#: ../../source/how-to-run-simulations.rst:132 +#, fuzzy msgid "" -"While the :code:`client_resources` can be used to control the degree of " +"While the ``client_resources`` can be used to control the degree of " "concurrency in your FL simulation, this does not stop you from running " "dozens, hundreds or even thousands of clients in the same round and " "having orders of magnitude more `dormant` (i.e. not participating in a " "round) clients. Let's say you want to have 100 clients per round but your" " system can only accommodate 8 clients concurrently. The " -":code:`VirtualClientEngine` will schedule 100 jobs to run (each " -"simulating a client sampled by the strategy) and then will execute them " -"in a resource-aware manner in batches of 8." +"``VirtualClientEngine`` will schedule 100 jobs to run (each simulating a " +"client sampled by the strategy) and then will execute them in a resource-" +"aware manner in batches of 8." msgstr "" "code:`client_resources`를 사용하여 FL 시뮬레이션의 동시성 정도를 제어할 수 있지만, 동일한 라운드에서 수십, " "수백 또는 수천 개의 클라이언트를 실행하고 훨씬 더 많은 '휴면'(즉, 라운드에 참여하지 않는) 클라이언트를 보유하는 것을 막을 " @@ -7274,7 +7578,7 @@ msgstr "" "code:`VirtualClientEngine`은 실행할 100개의 작업(각각 전략에서 샘플링한 클라이언트를 시뮬레이션)을 예약한 " "다음 리소스 인식 방식으로 8개씩 일괄적으로 실행합니다." -#: ../../source/how-to-run-simulations.rst:91 +#: ../../source/how-to-run-simulations.rst:140 msgid "" "To understand all the intricate details on how resources are used to " "schedule FL clients and how to define custom resources, please take a " @@ -7285,11 +7589,11 @@ msgstr "" "이해하려면 'Ray 문서 '를 참조하세요." -#: ../../source/how-to-run-simulations.rst:94 +#: ../../source/how-to-run-simulations.rst:145 msgid "Simulation examples" msgstr "시뮬레이션 예제" -#: ../../source/how-to-run-simulations.rst:96 +#: ../../source/how-to-run-simulations.rst:147 msgid "" "A few ready-to-run complete examples for Flower simulation in " "Tensorflow/Keras and PyTorch are provided in the `Flower repository " @@ -7298,7 +7602,7 @@ msgstr "" "Tensorflow/Keras와 파이토치에서 바로 실행할 수 있는 몇 가지 Flower 시뮬레이션 예제는 `Flower 레포지토리 " "`_에서 제공됩니다. Google Colab에서도 실행할 수 있습니다:" -#: ../../source/how-to-run-simulations.rst:98 +#: ../../source/how-to-run-simulations.rst:151 msgid "" "`Tensorflow/Keras Simulation " "`_: 100개의 클라이언트가 공동으로 MNIST에서 MLP 모델을 훈련합니다." -#: ../../source/how-to-run-simulations.rst:99 +#: ../../source/how-to-run-simulations.rst:154 msgid "" "`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " @@ -7317,28 +7621,29 @@ msgstr "" "파이토치 시뮬레이션 `_: 100개의 클라이언트가 공동으로 MNIST에서 CNN 모델을 훈련합니다." -#: ../../source/how-to-run-simulations.rst:104 +#: ../../source/how-to-run-simulations.rst:159 msgid "Multi-node Flower simulations" msgstr "멀티 노드 Flower 시뮬레이션" -#: ../../source/how-to-run-simulations.rst:106 +#: ../../source/how-to-run-simulations.rst:161 +#, fuzzy msgid "" -"Flower's :code:`VirtualClientEngine` allows you to run FL simulations " -"across multiple compute nodes. Before starting your multi-node simulation" -" ensure that you:" +"Flower's ``VirtualClientEngine`` allows you to run FL simulations across " +"multiple compute nodes. Before starting your multi-node simulation ensure" +" that you:" msgstr "" "Flower의 :code:`VirtualClientEngine`을 사용하면 여러 컴퓨팅 노드에서 FL 시뮬레이션을 실행할 수 " "있습니다. 멀티 노드 시뮬레이션을 시작하기 전에 다음 사항을 확인하세요:" -#: ../../source/how-to-run-simulations.rst:108 +#: ../../source/how-to-run-simulations.rst:164 msgid "Have the same Python environment in all nodes." msgstr "모든 노드에서 동일한 Python 환경을 유지합니다." -#: ../../source/how-to-run-simulations.rst:109 +#: ../../source/how-to-run-simulations.rst:165 msgid "Have a copy of your code (e.g. your entire repo) in all nodes." msgstr "모든 노드에 코드 사본(예: 전체 레포지토리)을 보관하세요." -#: ../../source/how-to-run-simulations.rst:110 +#: ../../source/how-to-run-simulations.rst:166 msgid "" "Have a copy of your dataset in all nodes (more about this in " ":ref:`simulation considerations `)" @@ -7346,78 +7651,82 @@ msgstr "" "모든 노드에 데이터 세트의 사본을 보유하세요(자세한 내용은 :ref:`simulation considerations " "`에서 확인하세요)" -#: ../../source/how-to-run-simulations.rst:111 +#: ../../source/how-to-run-simulations.rst:168 +#, fuzzy msgid "" -"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " -"`_ so the " -":code:`VirtualClientEngine` attaches to a running Ray instance." +"Pass ``ray_init_args={\"address\"=\"auto\"}`` to `start_simulation `_ so the " +"``VirtualClientEngine`` attaches to a running Ray instance." msgstr "" ":code:`ray_init_args={\"address\"=\"auto\"}`를 `start_simulation `_에 전달하여 " ":code:`VirtualClientEngine`이 실행 중인 Ray 인스턴스에 연결되도록 합니다." -#: ../../source/how-to-run-simulations.rst:112 +#: ../../source/how-to-run-simulations.rst:171 +#, fuzzy msgid "" -"Start Ray on you head node: on the terminal type :code:`ray start " -"--head`. This command will print a few lines, one of which indicates how " -"to attach other nodes to the head node." +"Start Ray on you head node: on the terminal type ``ray start --head``. " +"This command will print a few lines, one of which indicates how to attach" +" other nodes to the head node." msgstr "" "헤드 노드에서 Ray 시작: 터미널에서 :code:`ray start --head`를 입력합니다. 이 명령은 몇 줄을 출력하며, 그" " 중 하나는 다른 노드를 헤드 노드에 연결하는 방법을 나타냅니다." -#: ../../source/how-to-run-simulations.rst:113 +#: ../../source/how-to-run-simulations.rst:174 +#, fuzzy msgid "" "Attach other nodes to the head node: copy the command shown after " "starting the head and execute it on terminal of a new node: for example " -":code:`ray start --address='192.168.1.132:6379'`" +"``ray start --address='192.168.1.132:6379'``" msgstr "" "헤드 노드에 다른 노드 연결: 헤드를 시작한 후 표시된 명령어을 복사하여 새 노드의 터미널에서 실행합니다: 예: :code:`ray" " start --address='192.168.1.132:6379'`" -#: ../../source/how-to-run-simulations.rst:115 +#: ../../source/how-to-run-simulations.rst:178 msgid "" "With all the above done, you can run your code from the head node as you " "would if the simulation was running on a single node." msgstr "위의 모든 작업이 완료되면 단일 노드에서 시뮬레이션을 실행할 때와 마찬가지로 헤드 노드에서 코드를 실행할 수 있습니다." -#: ../../source/how-to-run-simulations.rst:117 +#: ../../source/how-to-run-simulations.rst:181 +#, fuzzy msgid "" "Once your simulation is finished, if you'd like to dismantle your cluster" -" you simply need to run the command :code:`ray stop` in each node's " -"terminal (including the head node)." +" you simply need to run the command ``ray stop`` in each node's terminal " +"(including the head node)." msgstr "" "시뮬레이션이 완료되면 클러스터를 해체하려면 각 노드(헤드 노드 포함)의 터미널에서 :code:`ray stop` 명령을 실행하기만 " "하면 됩니다." -#: ../../source/how-to-run-simulations.rst:120 +#: ../../source/how-to-run-simulations.rst:185 msgid "Multi-node simulation good-to-know" msgstr "멀티 노드 시뮬레이션에 대해 알아두면 좋은 사항" -#: ../../source/how-to-run-simulations.rst:122 +#: ../../source/how-to-run-simulations.rst:187 msgid "" "Here we list a few interesting functionality when running multi-node FL " "simulations:" msgstr "여기에서는 멀티 노드 FL 시뮬레이션을 실행할 때 흥미로운 몇 가지 기능을 나열합니다:" -#: ../../source/how-to-run-simulations.rst:124 +#: ../../source/how-to-run-simulations.rst:189 +#, fuzzy msgid "" -"User :code:`ray status` to check all nodes connected to your head node as" -" well as the total resources available to the " -":code:`VirtualClientEngine`." +"User ``ray status`` to check all nodes connected to your head node as " +"well as the total resources available to the ``VirtualClientEngine``." msgstr "" "사용자는 :code:`ray status`를 통해 헤드 노드에 연결된 모든 노드와 " ":code:`VirtualClientEngine`에 사용 가능한 총 리소스를 확인할 수 있습니다." -#: ../../source/how-to-run-simulations.rst:126 +#: ../../source/how-to-run-simulations.rst:192 +#, fuzzy msgid "" "When attaching a new node to the head, all its resources (i.e. all CPUs, " "all GPUs) will be visible by the head node. This means that the " -":code:`VirtualClientEngine` can schedule as many `virtual` clients as " -"that node can possible run. In some settings you might want to exclude " -"certain resources from the simulation. You can do this by appending " -"`--num-cpus=` and/or `--num-" -"gpus=` in any :code:`ray start` command (including " -"when starting the head)" +"``VirtualClientEngine`` can schedule as many `virtual` clients as that " +"node can possible run. In some settings you might want to exclude certain" +" resources from the simulation. You can do this by appending `--num-" +"cpus=` and/or `--num-gpus=` in " +"any ``ray start`` command (including when starting the head)" msgstr "" "새 노드를 헤드에 연결하면 해당 노드의 모든 리소스(즉, 모든 CPU, 모든 GPU)가 헤드 노드에 표시됩니다. 즉, " ":code:`VirtualClientEngine`은 해당 노드가 실행할 수 있는 만큼의 `가상` 클라이언트를 예약할 수 있습니다. " @@ -7425,17 +7734,17 @@ msgstr "" "포함)에 `--num-cpus=` 및/또는 `--num-" "gpus=`를 추가하여 이 작업을 수행하면 됩니다" -#: ../../source/how-to-run-simulations.rst:132 +#: ../../source/how-to-run-simulations.rst:202 msgid "Considerations for simulations" msgstr "시뮬레이션 시 고려 사항" -#: ../../source/how-to-run-simulations.rst:135 +#: ../../source/how-to-run-simulations.rst:206 msgid "" "We are actively working on these fronts so to make it trivial to run any " "FL workload with Flower simulation." msgstr "Flower 시뮬레이션으로 모든 FL 워크로드를 간편하게 실행할 수 있도록 이러한 측면에서 적극적으로 노력하고 있습니다." -#: ../../source/how-to-run-simulations.rst:138 +#: ../../source/how-to-run-simulations.rst:209 msgid "" "The current VCE allows you to run Federated Learning workloads in " "simulation mode whether you are prototyping simple scenarios on your " @@ -7450,30 +7759,32 @@ msgstr "" "동안, 아래에서는 Flower로 FL 파이프라인을 설계할 때 염두에 두어야 할 몇 가지 사항을 강조합니다. 또한 현재 구현에서 몇 " "가지 제한 사항을 강조합니다." -#: ../../source/how-to-run-simulations.rst:141 +#: ../../source/how-to-run-simulations.rst:217 msgid "GPU resources" msgstr "GPU 리소스" -#: ../../source/how-to-run-simulations.rst:143 +#: ../../source/how-to-run-simulations.rst:219 +#, fuzzy msgid "" "The VCE assigns a share of GPU memory to a client that specifies the key " -":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " +"``num_gpus`` in ``client_resources``. This being said, Ray (used " "internally by the VCE) is by default:" msgstr "" "VCE는 :code:`client_resources`에서 :code:`num_gpus` 키를 지정하는 클라이언트에 GPU 메모리 " "공유를 할당합니다. 즉, (VCE에서 내부적으로 사용하는) Ray가 기본적으로 사용됩니다:" -#: ../../source/how-to-run-simulations.rst:146 +#: ../../source/how-to-run-simulations.rst:222 +#, fuzzy msgid "" "not aware of the total VRAM available on the GPUs. This means that if you" -" set :code:`num_gpus=0.5` and you have two GPUs in your system with " -"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" -" concurrently." +" set ``num_gpus=0.5`` and you have two GPUs in your system with different" +" (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients " +"concurrently." msgstr "" "GPU에서 사용 가능한 총 VRAM을 인식하지 못합니다. 즉, 시스템에 서로 다른(예: 32GB와 8GB) VRAM 용량을 가진 두" " 개의 GPU가 있고 :code:`num_gpus=0.5`를 설정하면 둘 다 동시에 2개의 클라이언트를 실행하게 됩니다." -#: ../../source/how-to-run-simulations.rst:147 +#: ../../source/how-to-run-simulations.rst:225 msgid "" "not aware of other unrelated (i.e. not created by the VCE) workloads are " "running on the GPU. Two takeaways from this are:" @@ -7481,7 +7792,7 @@ msgstr "" "관련 없는(즉, VCE에 의해 생성되지 않은) 다른 워크로드가 GPU에서 실행되고 있는지 알지 못합니다. 여기서 두 가지 시사점을 " "얻을 수 있습니다:" -#: ../../source/how-to-run-simulations.rst:149 +#: ../../source/how-to-run-simulations.rst:228 msgid "" "Your Flower server might need a GPU to evaluate the `global model` after " "aggregation (by instance when making use of the `evaluate method `_를 사용할 때)" -#: ../../source/how-to-run-simulations.rst:150 +#: ../../source/how-to-run-simulations.rst:231 +#, fuzzy msgid "" "If you want to run several independent Flower simulations on the same " "machine you need to mask-out your GPUs with " -":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " -"experiment." +"``CUDA_VISIBLE_DEVICES=\"\"`` when launching your experiment." msgstr "" "동일한 머신에서 여러 개의 독립적인 Flower 시뮬레이션을 실행하려면, 실험을 시작할 때 " ":code:`CUDA_VISIBLE_DEVICES=\"\"`로 GPU를 마스킹해야 합니다." -#: ../../source/how-to-run-simulations.rst:153 +#: ../../source/how-to-run-simulations.rst:235 +#, fuzzy msgid "" -"In addition, the GPU resource limits passed to :code:`client_resources` " -"are not `enforced` (i.e. they can be exceeded) which can result in the " +"In addition, the GPU resource limits passed to ``client_resources`` are " +"not `enforced` (i.e. they can be exceeded) which can result in the " "situation of client using more VRAM than the ratio specified when " "starting the simulation." msgstr "" "또한 :code:`client_resources`에 전달된 GPU 리소스 제한이 '강제'되지 않아(즉, 초과할 수 있음) " "클라이언트가 시뮬레이션을 시작할 때 지정된 비율보다 더 많은 VRAM을 사용하는 상황이 발생할 수 있습니다." -#: ../../source/how-to-run-simulations.rst:156 +#: ../../source/how-to-run-simulations.rst:240 msgid "TensorFlow with GPUs" msgstr "GPU를 사용한 TensorFlow" -#: ../../source/how-to-run-simulations.rst:158 +#: ../../source/how-to-run-simulations.rst:242 msgid "" "When `using a GPU with TensorFlow " "`_ nearly your entire GPU memory of" @@ -7532,20 +7844,21 @@ msgstr "" "`_'를 통해 " "이 기본 동작을 비활성화할 수 있습니다." -#: ../../source/how-to-run-simulations.rst:160 +#: ../../source/how-to-run-simulations.rst:249 +#, fuzzy msgid "" "This would need to be done in the main process (which is where the server" " would run) and in each Actor created by the VCE. By means of " -":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " -"in order to specify a function to be executed upon actor initialization. " -"In this case, to enable GPU growth for TF workloads. It would look as " +"``actor_kwargs`` we can pass the reserved key `\"on_actor_init_fn\"` in " +"order to specify a function to be executed upon actor initialization. In " +"this case, to enable GPU growth for TF workloads. It would look as " "follows:" msgstr "" "이 작업은 메인 프로세스(서버가 실행되는 곳)와 VCE에서 생성한 각 액터에서 수행해야 합니다. " ":code:`actor_kwargs`를 통해 예약 키 `\"on_actor_init_fn\"`을 전달하여 액터 초기화 시 실행할 " "함수를 지정할 수 있습니다. 이 경우 TF 워크로드에 대한 GPU 증가를 활성화합니다. 다음과 같이 보입니다:" -#: ../../source/how-to-run-simulations.rst:179 +#: ../../source/how-to-run-simulations.rst:272 msgid "" "This is precisely the mechanism used in `Tensorflow/Keras Simulation " "`_ 예제에서 사용된 메커니즘입니다." -#: ../../source/how-to-run-simulations.rst:183 +#: ../../source/how-to-run-simulations.rst:276 msgid "Multi-node setups" msgstr "멀티 노드 설정" -#: ../../source/how-to-run-simulations.rst:185 +#: ../../source/how-to-run-simulations.rst:278 msgid "" "The VCE does not currently offer a way to control on which node a " "particular `virtual` client is executed. In other words, if more than a " @@ -7577,7 +7890,7 @@ msgstr "" "방식에 따라 모든 노드에 모든 데이터 세트 파티션의 복사본을 보유하거나 데이터 중복을 피하기 위해 데이터 세트 제공 메커니즘(예: " "nfs, 데이터베이스 사용)을 사용해야 할 수 있습니다." -#: ../../source/how-to-run-simulations.rst:187 +#: ../../source/how-to-run-simulations.rst:286 msgid "" "By definition virtual clients are `stateless` due to their ephemeral " "nature. A client state can be implemented as part of the Flower client " @@ -7610,17 +7923,17 @@ msgid "Model checkpointing" msgstr "모델 체크포인트" #: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +#, fuzzy msgid "" "Model updates can be persisted on the server-side by customizing " -":code:`Strategy` methods. Implementing custom strategies is always an " -"option, but for many cases it may be more convenient to simply customize " -"an existing strategy. The following code example defines a new " -":code:`SaveModelStrategy` which customized the existing built-in " -":code:`FedAvg` strategy. In particular, it customizes " -":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " -"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" -" before it returns those aggregated weights to the caller (i.e., the " -"server):" +"``Strategy`` methods. Implementing custom strategies is always an option," +" but for many cases it may be more convenient to simply customize an " +"existing strategy. The following code example defines a new " +"``SaveModelStrategy`` which customized the existing built-in ``FedAvg`` " +"strategy. In particular, it customizes ``aggregate_fit`` by calling " +"``aggregate_fit`` in the base class (``FedAvg``). It then continues to " +"save returned (aggregated) weights before it returns those aggregated " +"weights to the caller (i.e., the server):" msgstr "" ":code:`Strategy` 메소드를 사용자 지정하여 서버 측에서 모델 업데이트를 지속할 수 있습니다. 사용자 지정 전략을 " "구현하는 것은 항상 옵션이지만 대부분의 경우 기존 전략을 간단히 사용자 지정하는 것이 더 편리할 수 있습니다. 다음 코드 예시는 " @@ -7629,11 +7942,11 @@ msgstr "" ":code:`aggregate_fit`을 사용자 지정합니다. 그런 다음 호출자(즉, 서버)에게 집계된 가중치를 반환하기 전에 " "반환된(집계된) 가중치를 계속 저장합니다:" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:53 msgid "Save and load PyTorch checkpoints" msgstr "파이토치 체크포인트 저장 및 로드" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:55 msgid "" "Similar to the previous example but with a few extra steps, we'll show " "how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " @@ -7647,7 +7960,7 @@ msgstr "" "``ndarray``의 목록으로 변환되어야 하며, ``OrderedDict`` 클래스 구조에 따라 파이토치 " "``state_dict``로 변환됩니다." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:98 msgid "" "To load your progress, you simply append the following lines to your " "code. Note that this will iterate over all saved checkpoints and load the" @@ -7656,7 +7969,7 @@ msgstr "" "진행 상황을 로드하려면 코드에 다음 줄을 추가하기만 하면 됩니다. 이렇게 하면 저장된 모든 체크포인트를 반복하고 최신 체크포인트를 " "로드합니다:" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:111 msgid "" "Return/use this object of type ``Parameters`` wherever necessary, such as" " in the ``initial_parameters`` when defining a ``Strategy``." @@ -7677,22 +7990,22 @@ msgstr "" "Flower 0.19(및 다른 0.x 시리즈 릴리스)와 비교했을 때 기존 0.x 시리즈 프로젝트의 코드를 변경해야 하는 몇 가지 " "획기적인 변경 사항이 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 -#: ../../source/how-to-upgrade-to-flower-next.rst:43 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-upgrade-to-flower-next.rst:63 msgid "Install update" msgstr "업데이트 설치" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 msgid "" "Here's how to update an existing installation to Flower 1.0 using either " "pip or Poetry:" msgstr "다음은 pip 또는 Poetry를 사용하여 기존 설치를 Flower 1.0으로 업데이트하는 방법입니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 msgid "pip: add ``-U`` when installing." msgstr "pip: 설치할 때 ``-U``를 추가합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:16 msgid "" "``python -m pip install -U flwr`` (when using ``start_server`` and " "``start_client``)" @@ -7700,13 +8013,13 @@ msgstr "" "``python -m pip install -U flwr``(``start_server`` 및 ``start_client``를 " "사용하는 경우)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 msgid "" "``python -m pip install -U 'flwr[simulation]'`` (when using " "``start_simulation``)" msgstr "``python -m pip install -U 'flwr[simulation]'``(``start_simulation`` 사용 시)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 msgid "" "Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " "reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " @@ -7716,11 +8029,11 @@ msgstr "" "설치하세요(``poetry 설치``를 실행하기 전에 ``rm poetry.lock``을 통해 ``poetry.lock``을 삭제하는" " 것을 잊지 마세요)." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" msgstr "``flwr = \"^1.0.0\"``(``start_server`` 및 ``start_client`` 사용 시)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 msgid "" "``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " "using ``start_simulation``)" @@ -7728,26 +8041,26 @@ msgstr "" "``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` " "(``start_simulation`` 사용 시)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 -#: ../../source/how-to-upgrade-to-flower-next.rst:100 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 +#: ../../source/how-to-upgrade-to-flower-next.rst:120 msgid "Required changes" msgstr "필수 변경 사항" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:30 msgid "The following breaking changes require manual updates." msgstr "다음과 같은 주요 변경 사항에는 수동 업데이트가 필요합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 msgid "General" msgstr "일반" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:35 msgid "" "Pass all arguments as keyword arguments (not as positional arguments). " "Here's an example:" msgstr "모든 전달인자를 위치 전달인자가 아닌 키워드 전달인자로 전달합니다. 다음은 예시입니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:38 msgid "" "Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " "FlowerClient())``" @@ -7755,7 +8068,7 @@ msgstr "" "Flower 0.19 (위치 전달인자): ``start_client(\"127.0.0.1:8080\", " "FlowerClient())``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 msgid "" "Flower 1.0 (keyword arguments): " "``start_client(server_address=\"127.0.0.1:8080\", " @@ -7764,12 +8077,12 @@ msgstr "" "Flower 1.0 (키워드 전달인자): ``start_client(server_address=\"127.0.0.1:8080\", " "client=FlowerClient())``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 #: ../../source/ref-api/flwr.client.Client.rst:2 msgid "Client" msgstr "클라이언트" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 msgid "" "Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " "``def get_parameters(self, config):``" @@ -7777,7 +8090,7 @@ msgstr "" "``NumPyClient``의 서브클래스: ``def get_parameters(self):``를 ``def " "get_parameters(self, config):``로 변경합니다" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 msgid "" "Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " "get_parameters(self, ins: GetParametersIns):``" @@ -7785,11 +8098,11 @@ msgstr "" "``클라이언트``의 서브클래스: ``def get_parameters(self):``를 ``def " "get_parameters(self, ins: GetParametersIns):``로 변경합니다" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 msgid "Strategies / ``start_server`` / ``start_simulation``" msgstr "전략 / ``start_server`` / ``start_simulation``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:53 msgid "" "Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " "``start_simulation``. Here's an example:" @@ -7797,7 +8110,7 @@ msgstr "" "Dictionary 대신 ``ServerConfig``를 ``start_server`` 및 ``start_simulation``에 " "전달합니다. 다음은 예제입니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 msgid "" "Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " "\"round_timeout\": 600.0}, ...)``" @@ -7805,7 +8118,7 @@ msgstr "" "Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " "\"round_timeout\": 600.0}, ...)``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 msgid "" "Flower 1.0: ``start_server(..., " "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " @@ -7815,7 +8128,7 @@ msgstr "" "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 msgid "" "Replace ``num_rounds=1`` in ``start_simulation`` with the new " "``config=ServerConfig(...)`` (see previous item)" @@ -7823,7 +8136,7 @@ msgstr "" "``start_simulation``의 ``num_rounds=1``을 새로운 ``config=ServerConfig(...)``로" " 바꿉니다(이전 항목 참조)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:63 msgid "" "Remove ``force_final_distributed_eval`` parameter from calls to " "``start_server``. Distributed evaluation on all clients can be enabled by" @@ -7834,19 +8147,19 @@ msgstr "" "클라이언트에 대한 분산 평가는 마지막 훈련 라운드 후 평가를 위해 모든 클라이언트를 샘플링하도록 전략을 구성하여 활성화할 수 " "있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:66 msgid "Rename parameter/ndarray conversion functions:" msgstr "매개변수/ndarray 변환 함수의 이름을 바꿉니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:69 msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" msgstr "``weights_to_parameters`` --> ``ndarrays_to_parameters``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 msgid "" "Strategy initialization: if the strategy relies on the default values for" " ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " @@ -7861,23 +8174,23 @@ msgstr "" "호출하여)는 이제 ``fraction_fit`` 및 ``fraction_evaluate``를 ``0.1``로 설정하여 FedAvg를" " 수동으로 초기화해야 합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" msgstr "기본 제공 전략 매개변수의 이름을 바꿉니다(예: ``FedAvg``):" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:79 msgid "``fraction_eval`` --> ``fraction_evaluate``" msgstr "``fraction_eval`` --> ``fraction_evaluate``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 msgid "``min_eval_clients`` --> ``min_evaluate_clients``" msgstr "``min_eval_clients`` --> ``min_evaluate_clients``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:81 msgid "``eval_fn`` --> ``evaluate_fn``" msgstr "``eval_fn`` --> ``evaluate_fn``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:83 msgid "" "Rename ``rnd`` to ``server_round``. This impacts multiple methods and " "functions, for example, ``configure_fit``, ``aggregate_fit``, " @@ -7887,11 +8200,11 @@ msgstr "" " ``aggregate_fit``, ``configure_evaluate``, ``aggregate_evaluate`` 및 " "``evaluate_fn``)에 영향을 미칩니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" msgstr "``server_round`` 및 ``config``를 ``evaluate_fn``에 추가합니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 msgid "" "Flower 0.19: ``def evaluate(parameters: NDArrays) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" @@ -7899,7 +8212,7 @@ msgstr "" "Flower 0.19: ``def evaluate(parameters: NDArrays) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 msgid "" "Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " "config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " @@ -7909,11 +8222,11 @@ msgstr "" "config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " "Scalar]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:94 msgid "Custom strategies" msgstr "사용자 정의 전략" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:96 msgid "" "The type of parameter ``failures`` has changed from " "``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " @@ -7926,13 +8239,13 @@ msgstr "" "BaseException]]``(``aggregate_fit``에서) 및 ``List[Union[Tuple[ClientProxy]," " EvaluateRes], BaseException]]``(``aggregate_evaluate``)로 변경되었습니다" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:100 msgid "" "The ``Strategy`` method ``evaluate`` now receives the current round of " "federated learning/evaluation as the first parameter:" msgstr "이제 ``Strategy`` 메서드 ``evaluate``는 현재 federated 학습/평가 라운드를 첫 번째 파라미터로 받습니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 msgid "" "Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" @@ -7940,7 +8253,7 @@ msgstr "" "Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 msgid "" "Flower 1.0: ``def evaluate(self, server_round: int, parameters: " "Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" @@ -7948,17 +8261,17 @@ msgstr "" "Flower 1.0: ``def evaluate(self, server_round: int, parameters: " "Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 msgid "Optional improvements" msgstr "선택적 개선 사항" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:111 msgid "" "Along with the necessary changes above, there are a number of potential " "improvements that just became possible:" msgstr "위의 필수 변경 사항과 함께 방금 가능한 여러 가지 잠재적 개선 사항이 있습니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 msgid "" "Remove \"placeholder\" methods from subclasses of ``Client`` or " "``NumPyClient``. If you, for example, use server-side evaluation, then " @@ -7968,7 +8281,7 @@ msgstr "" "``Client`` 또는 ``NumPyClient``의 서브 클래스에서 \"placeholder\" 메서드를 제거합니다. 예를 들어" " 서버 측 평가를 사용하는 경우 ``evaluate``의 빈 자리 표시자 구현은 더 이상 필요하지 않습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:117 msgid "" "Configure the round timeout via ``start_simulation``: " "``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " @@ -7978,12 +8291,12 @@ msgstr "" "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 -#: ../../source/how-to-upgrade-to-flower-next.rst:317 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:121 +#: ../../source/how-to-upgrade-to-flower-next.rst:348 msgid "Further help" msgstr "추가 도움말" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 msgid "" "Most official `Flower code examples " "`_ are already updated" @@ -8012,7 +8325,7 @@ msgstr "" " 시작한 사용자든 상관없이 기존 설정을 원활하게 전환하여 버전 1.8부터 Flower Next의 최신 기능 및 개선 사항을 활용할 " "수 있도록 도와드립니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:9 +#: ../../source/how-to-upgrade-to-flower-next.rst:11 msgid "" "This guide shows how to reuse pre-``1.8`` Flower code with minimum code " "changes by using the *compatibility layer* in Flower Next. In another " @@ -8023,35 +8336,35 @@ msgstr "" " 재사용하는 방법을 보여줍니다. 다른 가이드에서는 순수한 Flower Next API로 Flower Next를 end-to-end로" " 실행하는 방법을 보여드리겠습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:13 +#: ../../source/how-to-upgrade-to-flower-next.rst:15 msgid "Let's dive in!" msgstr "자세히 알아봅시다!" -#: ../../source/how-to-upgrade-to-flower-next.rst:48 +#: ../../source/how-to-upgrade-to-flower-next.rst:68 msgid "" "Here's how to update an existing installation of Flower to Flower Next " "with ``pip``:" msgstr "기존에 설치된 Flower to Flower Next를 ``pip``으로 업데이트하는 방법은 다음과 같습니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:54 +#: ../../source/how-to-upgrade-to-flower-next.rst:74 msgid "or if you need Flower Next with simulation:" msgstr "또는 시뮬레이션이 포함된 Flower Next가 필요한 경우:" -#: ../../source/how-to-upgrade-to-flower-next.rst:61 +#: ../../source/how-to-upgrade-to-flower-next.rst:80 msgid "" "Ensure you set the following version constraint in your " "``requirements.txt``" msgstr "``requirements.txt``에서 다음 버전 제약 조건을 설정했는지 확인하세요" -#: ../../source/how-to-upgrade-to-flower-next.rst:71 +#: ../../source/how-to-upgrade-to-flower-next.rst:90 msgid "or ``pyproject.toml``:" msgstr "또는 ``pyproject.toml``:" -#: ../../source/how-to-upgrade-to-flower-next.rst:82 +#: ../../source/how-to-upgrade-to-flower-next.rst:101 msgid "Using Poetry" msgstr "Poetry 사용" -#: ../../source/how-to-upgrade-to-flower-next.rst:84 +#: ../../source/how-to-upgrade-to-flower-next.rst:103 msgid "" "Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " "(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " @@ -8060,13 +8373,13 @@ msgstr "" "``pyproject.toml``에서 ``flwr`` 의존성를 업데이트한 다음 다시 설치하세요(``poetry install``을 " "실행하기 전에 ``rm poetry.lock``을 통해 ``poetry.lock``을 삭제하는 것을 잊지 마세요)." -#: ../../source/how-to-upgrade-to-flower-next.rst:86 +#: ../../source/how-to-upgrade-to-flower-next.rst:106 msgid "" "Ensure you set the following version constraint in your " "``pyproject.toml``:" msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" -#: ../../source/how-to-upgrade-to-flower-next.rst:102 +#: ../../source/how-to-upgrade-to-flower-next.rst:122 msgid "" "In Flower Next, the *infrastructure* and *application layers* have been " "decoupled. Instead of starting a client in code via ``start_client()``, " @@ -8085,11 +8398,11 @@ msgstr "" "업데이트가 필요하지 않고 기존 방식과 Flower Next 방식 모두에서 프로젝트를 실행할 수 있는 non-breaking 변경 " "사항은 다음과 같습니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:109 +#: ../../source/how-to-upgrade-to-flower-next.rst:131 msgid "|clientapp_link|_" msgstr "|clientapp_link|_" -#: ../../source/how-to-upgrade-to-flower-next.rst:110 +#: ../../source/how-to-upgrade-to-flower-next.rst:133 msgid "" "Wrap your existing client with |clientapp_link|_ instead of launching it " "via |startclient_link|_. Here's an example:" @@ -8097,11 +8410,11 @@ msgstr "" "|clientapp_link|_를 통해 실행하는 대신 기존 클라이언트를 |clientapp_link|_로 래핑하세요. 다음은 " "예시입니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:132 +#: ../../source/how-to-upgrade-to-flower-next.rst:156 msgid "|serverapp_link|_" msgstr "|serverapp_link|_" -#: ../../source/how-to-upgrade-to-flower-next.rst:133 +#: ../../source/how-to-upgrade-to-flower-next.rst:158 msgid "" "Wrap your existing strategy with |serverapp_link|_ instead of starting " "the server via |startserver_link|_. Here's an example:" @@ -8109,11 +8422,11 @@ msgstr "" "서버를 시작하려면 |startserver_link|_를 통해 서버를 시작하는 대신 기존 전략을 |serverapp_link|_로 " "래핑하세요. 다음은 예시입니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:154 +#: ../../source/how-to-upgrade-to-flower-next.rst:179 msgid "Deployment" msgstr "배포" -#: ../../source/how-to-upgrade-to-flower-next.rst:155 +#: ../../source/how-to-upgrade-to-flower-next.rst:181 msgid "" "Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " "in sequence, |flowernext_clientapp_link|_ (2x) and " @@ -8124,13 +8437,13 @@ msgstr "" "|flowernext_clientapp_link|_(2회) 및 |flowernext_serverapp_link|_를 순서대로 " "실행합니다. 'client.py'와 'server.py'를 Python 스크립트로 실행할 필요는 없습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:158 +#: ../../source/how-to-upgrade-to-flower-next.rst:184 msgid "" "Here's an example to start the server without HTTPS (only for " "prototyping):" msgstr "다음은 HTTPS 없이 서버를 시작하는 예제입니다(프로토타이핑용으로만 사용):" -#: ../../source/how-to-upgrade-to-flower-next.rst:174 +#: ../../source/how-to-upgrade-to-flower-next.rst:200 msgid "" "Here's another example to start with HTTPS. Use the ``--ssl-ca-" "certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " @@ -8140,11 +8453,11 @@ msgstr "" "다음은 HTTPS로 시작하는 또 다른 예제입니다. '`--ssl-ca-certfile``, '`--ssl-certfile``, " "'`--ssl-keyfile`` 명령줄 옵션을 사용하여 (CA 인증서, 서버 인증서 및 서버 개인 키)의 경로를 전달합니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:201 +#: ../../source/how-to-upgrade-to-flower-next.rst:228 msgid "Simulation in CLI" msgstr "CLI 시뮬레이션" -#: ../../source/how-to-upgrade-to-flower-next.rst:202 +#: ../../source/how-to-upgrade-to-flower-next.rst:230 msgid "" "Wrap your existing client and strategy with |clientapp_link|_ and " "|serverapp_link|_, respectively. There is no need to use |startsim_link|_" @@ -8153,7 +8466,7 @@ msgstr "" "기존 클라이언트와 전략을 각각 |clientapp_link|_와 |serverapp_link|_로 래핑하세요. 더 이상 " "|startsim_link|_를 사용할 필요가 없습니다. 다음은 예시입니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:232 +#: ../../source/how-to-upgrade-to-flower-next.rst:263 msgid "" "Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " "``client_app`` object in the code instead of executing the Python script." @@ -8164,7 +8477,7 @@ msgstr "" "``server_app`` / ``client_app`` 개체를 가리키세요. 다음은 예제입니다(``server_app`` 및 " "``client_app`` 객체가 ``sim.py`` 모듈에 있다고 가정):" -#: ../../source/how-to-upgrade-to-flower-next.rst:249 +#: ../../source/how-to-upgrade-to-flower-next.rst:280 msgid "" "Set default resources for each |clientapp_link|_ using the ``--backend-" "config`` command line argument instead of setting the " @@ -8173,17 +8486,17 @@ msgstr "" "|startsim_link|_에서 ``client_resources`` 인수를 설정하는 대신 ``--backend-config`` " "명령줄 인수를 사용하여 각 |clientapp_link|_에 대한 기본 리소스를 설정하세요. 다음은 예시입니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:275 +#: ../../source/how-to-upgrade-to-flower-next.rst:304 msgid "Simulation in a Notebook" msgstr "Notebook에서 시뮬레이션" -#: ../../source/how-to-upgrade-to-flower-next.rst:276 +#: ../../source/how-to-upgrade-to-flower-next.rst:306 msgid "" "Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " "an example:" msgstr "notebook에서 |startsim_link|_ 대신 |runsim_link|_를 실행하세요. 다음은 예시입니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:319 +#: ../../source/how-to-upgrade-to-flower-next.rst:350 msgid "" "Some official `Flower code examples `_ " "are already updated to Flower Next so they can serve as a reference for " @@ -8199,11 +8512,11 @@ msgstr "" "``Flower Discuss `_에 참여하여 질문에 대한 답변을 확인하거나 다른" " 사람들과 Flower Next로의 이동에 대해 공유하고 배울 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:325 +#: ../../source/how-to-upgrade-to-flower-next.rst:357 msgid "Important" msgstr "중요" -#: ../../source/how-to-upgrade-to-flower-next.rst:328 +#: ../../source/how-to-upgrade-to-flower-next.rst:359 msgid "" "As we continuously enhance Flower Next at a rapid pace, we'll be " "periodically updating this guide. Please feel free to share any feedback " @@ -8212,7 +8525,7 @@ msgstr "" "Flower Next는 빠른 속도로 지속적으로 개선되고 있으므로 이 가이드는 주기적으로 업데이트될 예정입니다. 피드백이 있으면 " "언제든지 공유해 주세요!" -#: ../../source/how-to-upgrade-to-flower-next.rst:334 +#: ../../source/how-to-upgrade-to-flower-next.rst:365 msgid "Happy migrating! 🚀" msgstr "행복한 마이그레이션! 🚀" @@ -8226,7 +8539,7 @@ msgid "" " interfaces may change in future versions.**" msgstr "**참고: 이 튜토리얼은 실험적인 기능을 다룹니다. 기능 및 인터페이스는 향후 버전에서 변경될 수 있습니다.**" -#: ../../source/how-to-use-built-in-mods.rst:6 +#: ../../source/how-to-use-built-in-mods.rst:7 msgid "" "In this tutorial, we will learn how to utilize built-in mods to augment " "the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " @@ -8236,11 +8549,11 @@ msgstr "" "이 튜토리얼에서는 내장 모드를 활용하여 ``ClientApp``의 동작을 보강하는 방법을 배우겠습니다. " "Mods(Modifiers라고도 함)를 사용하면 ``ClientApp``에서 작업이 처리되기 전과 후에 작업을 수행할 수 있습니다." -#: ../../source/how-to-use-built-in-mods.rst:9 +#: ../../source/how-to-use-built-in-mods.rst:12 msgid "What are Mods?" msgstr "Mods란 무엇인가요?" -#: ../../source/how-to-use-built-in-mods.rst:11 +#: ../../source/how-to-use-built-in-mods.rst:14 msgid "" "A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " "or inspect the incoming ``Message`` and the resulting outgoing " @@ -8249,41 +8562,41 @@ msgstr "" "Mod는 ``ClientApp``을 감싸는 콜러블입니다. 들어오는 ``Message``와 그 결과로 나가는 ``Message``를 " "조작하거나 검사할 수 있습니다. ``Mod``의 시그니처는 다음과 같습니다:" -#: ../../source/how-to-use-built-in-mods.rst:18 +#: ../../source/how-to-use-built-in-mods.rst:23 msgid "A typical mod function might look something like this:" msgstr "일반적인 mod 함수는 다음과 같은 모습일 수 있습니다:" -#: ../../source/how-to-use-built-in-mods.rst:31 +#: ../../source/how-to-use-built-in-mods.rst:36 msgid "Using Mods" msgstr "Mods 사용" -#: ../../source/how-to-use-built-in-mods.rst:33 +#: ../../source/how-to-use-built-in-mods.rst:38 msgid "To use mods in your ``ClientApp``, you can follow these steps:" msgstr "``ClientApp``에서 mods를 사용하려면 다음 단계를 따르세요:" -#: ../../source/how-to-use-built-in-mods.rst:36 +#: ../../source/how-to-use-built-in-mods.rst:41 msgid "1. Import the required mods" msgstr "1. 필요한 mods를 가져옵니다" -#: ../../source/how-to-use-built-in-mods.rst:38 +#: ../../source/how-to-use-built-in-mods.rst:43 msgid "First, import the built-in mod you intend to use:" msgstr "먼저 사용하려는 기본 제공 mod를 가져옵니다:" -#: ../../source/how-to-use-built-in-mods.rst:46 +#: ../../source/how-to-use-built-in-mods.rst:51 msgid "2. Define your client function" msgstr "2. 클라이언트 기능 정의" -#: ../../source/how-to-use-built-in-mods.rst:48 +#: ../../source/how-to-use-built-in-mods.rst:53 msgid "" "Define your client function (``client_fn``) that will be wrapped by the " "mod(s):" msgstr "mod(s)로 래핑할 클라이언트 함수('``client_fn``)를 정의합니다:" -#: ../../source/how-to-use-built-in-mods.rst:57 +#: ../../source/how-to-use-built-in-mods.rst:62 msgid "3. Create the ``ClientApp`` with mods" msgstr "3. mods로 ``ClientApp``을 생성합니다" -#: ../../source/how-to-use-built-in-mods.rst:59 +#: ../../source/how-to-use-built-in-mods.rst:64 msgid "" "Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " "argument. The order in which you provide the mods matters:" @@ -8291,39 +8604,39 @@ msgstr "" "``ClientApp``을 생성하고 mods를 ``mods`` argument에 목록으로 전달합니다. mods를 제공하는 순서가 " "중요합니다:" -#: ../../source/how-to-use-built-in-mods.rst:72 +#: ../../source/how-to-use-built-in-mods.rst:78 msgid "Order of execution" msgstr "실행 순서" -#: ../../source/how-to-use-built-in-mods.rst:74 +#: ../../source/how-to-use-built-in-mods.rst:80 msgid "" "When the ``ClientApp`` runs, the mods are executed in the order they are " "provided in the list:" msgstr "``ClientApp``이 실행되면 목록에 제공된 순서대로 모드가 실행됩니다:" -#: ../../source/how-to-use-built-in-mods.rst:76 +#: ../../source/how-to-use-built-in-mods.rst:83 msgid "``example_mod_1`` (outermost mod)" msgstr "``example_mod_1``(가장 바깥쪽 mod)" -#: ../../source/how-to-use-built-in-mods.rst:77 +#: ../../source/how-to-use-built-in-mods.rst:84 msgid "``example_mod_2`` (next mod)" msgstr "``example_mod_2`` (다음 mod)" -#: ../../source/how-to-use-built-in-mods.rst:78 +#: ../../source/how-to-use-built-in-mods.rst:85 msgid "" "Message handler (core function that handles the incoming ``Message`` and " "returns the outgoing ``Message``)" msgstr "Message handler(들어오는 ``Message``를 처리하고 나가는 ``Message``를 반환하는 핵심 함수)" -#: ../../source/how-to-use-built-in-mods.rst:79 +#: ../../source/how-to-use-built-in-mods.rst:87 msgid "``example_mod_2`` (on the way back)" msgstr "``example_mod_2``(돌아가는 방법)" -#: ../../source/how-to-use-built-in-mods.rst:80 +#: ../../source/how-to-use-built-in-mods.rst:88 msgid "``example_mod_1`` (outermost mod on the way back)" msgstr "``example_mod_1``(돌아가는 방법에 가장 바깥쪽 모드)" -#: ../../source/how-to-use-built-in-mods.rst:82 +#: ../../source/how-to-use-built-in-mods.rst:90 msgid "" "Each mod has a chance to inspect and modify the incoming ``Message`` " "before passing it to the next mod, and likewise with the outgoing " @@ -8332,7 +8645,7 @@ msgstr "" "각 mod는 다음 mod로 전달하기 전에 들어오는 ``Message``를 검사하고 수정할 기회가 있으며, 스택 위로 반환하기 전에 " "나가는 ``Message``도 마찬가지로 검사하고 수정할 수 있습니다." -#: ../../source/how-to-use-built-in-mods.rst:87 +#: ../../source/how-to-use-built-in-mods.rst:97 msgid "" "By following this guide, you have learned how to effectively use mods to " "enhance your ``ClientApp``'s functionality. Remember that the order of " @@ -8341,7 +8654,7 @@ msgstr "" "이 가이드를 따라 mods를 효과적으로 사용하여 ``ClientApp``의 기능을 향상시키는 방법을 배웠습니다. mods 순서는 " "매우 중요하며 입력과 출력이 처리되는 방식에 영향을 미친다는 점을 기억하세요." -#: ../../source/how-to-use-built-in-mods.rst:89 +#: ../../source/how-to-use-built-in-mods.rst:101 msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" msgstr "Mods를 통해 더욱 강력하고 유연한 ``ClientApp``을 구축해 보세요!" @@ -8349,7 +8662,7 @@ msgstr "Mods를 통해 더욱 강력하고 유연한 ``ClientApp``을 구축해 msgid "Use Differential Privacy" msgstr "차분 개인정보 보호 사용" -#: ../../source/how-to-use-differential-privacy.rst:3 +#: ../../source/how-to-use-differential-privacy.rst:4 msgid "" "This guide explains how you can utilize differential privacy in the " "Flower framework. If you are not yet familiar with differential privacy, " @@ -8358,7 +8671,7 @@ msgstr "" "이 가이드에서는 Flower 프레임워크에서 차분 개인정보 보호 기능을 활용하는 방법을 설명합니다. 차분 개인정보 보호에 대해 아직 " "익숙하지 않은 경우 :doc:`explanation-differential-privacy`를 참조하세요." -#: ../../source/how-to-use-differential-privacy.rst:7 +#: ../../source/how-to-use-differential-privacy.rst:10 msgid "" "Differential Privacy in Flower is in a preview phase. If you plan to use " "these features in a production environment with sensitive data, feel free" @@ -8368,7 +8681,7 @@ msgstr "" "Flower의 차분 개인정보 보호는 현재 프리뷰 단계에 있습니다. 민감한 데이터가 있는 프로덕션 환경에서 이러한 기능을 사용할 " "계획이라면 언제든지 문의하여 요구 사항을 논의하고 이러한 기능을 가장 잘 사용하는 방법에 대한 안내를 받으세요." -#: ../../source/how-to-use-differential-privacy.rst:12 +#: ../../source/how-to-use-differential-privacy.rst:17 #, fuzzy msgid "" "This approach consists of two separate phases: clipping of the updates " @@ -8379,7 +8692,7 @@ msgstr "" "이 접근 방식은 업데이트 클리핑과 집계된 모델에 노이즈 추가라는 두 가지 단계로 구성됩니다. 클리핑 단계의 경우, Flower " "프레임워크는 클리핑을 서버 측에서 수행할지 클라이언트 측에서 수행할지 결정할 수 있도록 했습니다." -#: ../../source/how-to-use-differential-privacy.rst:15 +#: ../../source/how-to-use-differential-privacy.rst:21 msgid "" "**Server-side Clipping**: This approach has the advantage of the server " "enforcing uniform clipping across all clients' updates and reducing the " @@ -8391,7 +8704,7 @@ msgstr "" "값에 대한 통신 오버헤드를 줄일 수 있다는 장점이 있습니다. 하지만 모든 클라이언트에 대해 클리핑 작업을 수행해야 하기 때문에 " "서버의 계산 부하가 증가한다는 단점도 있습니다." -#: ../../source/how-to-use-differential-privacy.rst:16 +#: ../../source/how-to-use-differential-privacy.rst:26 msgid "" "**Client-side Clipping**: This approach has the advantage of reducing the" " computational overhead on the server. However, it also has the " @@ -8401,18 +8714,19 @@ msgstr "" "**Client-side Clipping**: 이 방식은 서버의 계산 오버헤드를 줄일 수 있다는 장점이 있습니다. 하지만 서버가 " "클리핑 프로세스에 대한 통제력이 떨어지기 때문에 centralized 제어가 부족하다는 단점도 있습니다." -#: ../../source/how-to-use-differential-privacy.rst:21 +#: ../../source/how-to-use-differential-privacy.rst:31 msgid "Server-side Clipping" msgstr "서버 측 클리핑" -#: ../../source/how-to-use-differential-privacy.rst:22 +#: ../../source/how-to-use-differential-privacy.rst:33 +#, fuzzy msgid "" -"For central DP with server-side clipping, there are two :code:`Strategy` " -"classes that act as wrappers around the actual :code:`Strategy` instance " -"(for example, :code:`FedAvg`). The two wrapper classes are " -":code:`DifferentialPrivacyServerSideFixedClipping` and " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " -"adaptive clipping." +"For central DP with server-side clipping, there are two ``Strategy`` " +"classes that act as wrappers around the actual ``Strategy`` instance (for" +" example, ``FedAvg``). The two wrapper classes are " +"``DifferentialPrivacyServerSideFixedClipping`` and " +"``DifferentialPrivacyServerSideAdaptiveClipping`` for fixed and adaptive " +"clipping." msgstr "" "서버 측 클리핑이 있는 중앙 DP의 경우, 실제 :code:`Strategy` 인스턴스를 감싸는 래퍼 역할을 하는 두 개의 " ":code:`Strategy` 클래스가 있습니다(예: :code:`FedAvg`). 두 개의 래퍼 클래스는 고정 및 적응형 클리핑을" @@ -8423,13 +8737,13 @@ msgstr "" msgid "server side clipping" msgstr "서버 측 클리핑" -#: ../../source/how-to-use-differential-privacy.rst:31 +#: ../../source/how-to-use-differential-privacy.rst:43 +#, fuzzy msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use server-" -"side fixed clipping using the " -":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " -"same approach can be used with " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " +"The code sample below enables the ``FedAvg`` strategy to use server-side " +"fixed clipping using the ``DifferentialPrivacyServerSideFixedClipping`` " +"wrapper class. The same approach can be used with " +"``DifferentialPrivacyServerSideAdaptiveClipping`` by adjusting the " "corresponding input parameters." msgstr "" "아래 코드 샘플은 :code:`FedAvg` 전략이 " @@ -8438,19 +8752,20 @@ msgstr "" ":code:`DifferentialPrivacyServerSideAdaptiveClipping`과 동일한 접근 방식을 사용할 수 " "있습니다." -#: ../../source/how-to-use-differential-privacy.rst:52 +#: ../../source/how-to-use-differential-privacy.rst:64 msgid "Client-side Clipping" msgstr "클라이언트 측 클리핑" -#: ../../source/how-to-use-differential-privacy.rst:53 +#: ../../source/how-to-use-differential-privacy.rst:66 +#, fuzzy msgid "" "For central DP with client-side clipping, the server sends the clipping " "value to selected clients on each round. Clients can use existing Flower " -":code:`Mods` to perform the clipping. Two mods are available for fixed " -"and adaptive client-side clipping: :code:`fixedclipping_mod` and " -":code:`adaptiveclipping_mod` with corresponding server-side wrappers " -":code:`DifferentialPrivacyClientSideFixedClipping` and " -":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +"``Mods`` to perform the clipping. Two mods are available for fixed and " +"adaptive client-side clipping: ``fixedclipping_mod`` and " +"``adaptiveclipping_mod`` with corresponding server-side wrappers " +"``DifferentialPrivacyClientSideFixedClipping`` and " +"``DifferentialPrivacyClientSideAdaptiveClipping``." msgstr "" "클라이언트 측 클리핑이 있는 중앙 DP의 경우 서버는 각 라운드마다 선택한 클라이언트에 클리핑 값을 보냅니다. 클라이언트는 기존 " "Flower :code:`Mods`를 사용하여 클리핑을 수행할 수 있습니다. 고정 및 적응형 클라이언트 측 클리핑에는 두 가지 " @@ -8462,27 +8777,29 @@ msgstr "" msgid "client side clipping" msgstr "클라이언트 측 클리핑" -#: ../../source/how-to-use-differential-privacy.rst:63 +#: ../../source/how-to-use-differential-privacy.rst:78 +#, fuzzy msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use " -"differential privacy with client-side fixed clipping using both the " -":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " -"the client, :code:`fixedclipping_mod`:" +"The code sample below enables the ``FedAvg`` strategy to use differential" +" privacy with client-side fixed clipping using both the " +"``DifferentialPrivacyClientSideFixedClipping`` wrapper class and, on the " +"client, ``fixedclipping_mod``:" msgstr "" "아래 코드 샘플은 :code:`FedAvg` 전략이 클라이언트 측 고정 클리핑과 함께 차분 프라이버시를 사용할 수 있도록 " ":code:`DifferentialPrivacyClientSideFixedClipping` 래퍼 클래스와 클라이언트에서 " ":code:`fixedclipping_mod`를 모두 사용하도록 합니다:" -#: ../../source/how-to-use-differential-privacy.rst:80 +#: ../../source/how-to-use-differential-privacy.rst:97 +#, fuzzy msgid "" -"In addition to the server-side strategy wrapper, the :code:`ClientApp` " -"needs to configure the matching :code:`fixedclipping_mod` to perform the " -"client-side clipping:" +"In addition to the server-side strategy wrapper, the ``ClientApp`` needs " +"to configure the matching ``fixedclipping_mod`` to perform the client-" +"side clipping:" msgstr "" "서버 측 전략 래퍼 외에도 클라이언트 측 클리핑을 수행하려면 :code:`ClientApp`이 일치하는 " ":code:`fixedclipping_mod`를 구성해야 합니다:" -#: ../../source/how-to-use-differential-privacy.rst:97 +#: ../../source/how-to-use-differential-privacy.rst:115 msgid "" "To utilize local differential privacy (DP) and add noise to the client " "model parameters before transmitting them to the server in Flower, you " @@ -8496,11 +8813,12 @@ msgstr "" msgid "local DP mod" msgstr "로컬 DP mod" -#: ../../source/how-to-use-differential-privacy.rst:104 -msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +#: ../../source/how-to-use-differential-privacy.rst:125 +#, fuzzy +msgid "Below is a code example that shows how to use ``LocalDpMod``:" msgstr "다음은 :code:`LocalDpMod`를 사용하는 방법을 보여주는 코드 예시입니다:" -#: ../../source/how-to-use-differential-privacy.rst:122 +#: ../../source/how-to-use-differential-privacy.rst:140 msgid "" "Please note that the order of mods, especially those that modify " "parameters, is important when using multiple modifiers. Typically, " @@ -8510,11 +8828,11 @@ msgstr "" "여러 개의 수정자를 사용할 때는 수정자, 특히 매개변수를 수정하는 수정자의 순서가 중요하다는 점에 유의하세요. 일반적으로 차분 " "프라이버시(DP) 수정자는 매개변수에서 가장 마지막에 작동해야 합니다." -#: ../../source/how-to-use-differential-privacy.rst:125 +#: ../../source/how-to-use-differential-privacy.rst:145 msgid "Local Training using Privacy Engines" msgstr "Privacy Engines을 사용한 로컬 훈련" -#: ../../source/how-to-use-differential-privacy.rst:126 +#: ../../source/how-to-use-differential-privacy.rst:147 msgid "" "For ensuring data instance-level privacy during local model training on " "the client side, consider leveraging privacy engines such as Opacus and " @@ -8536,54 +8854,57 @@ msgid "Use strategies" msgstr "전략 사용하기" #: ../../source/how-to-use-strategies.rst:4 +#, fuzzy msgid "" "Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." +"``Strategy`` abstraction. A number of built-in strategies are provided in" +" the core framework." msgstr "" "Flower는 :code:`Strategy` abstraction를 통해 학습 과정을 완전히 사용자 정의할 수 있습니다. 핵심 " "프레임워크에는 여러 가지 기본 제공 전략이 제공됩니다." -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-use-strategies.rst:7 msgid "" "There are three ways to customize the way Flower orchestrates the " "learning process on the server side:" msgstr "서버 측에서 Flower가 학습 과정을 조율하는 방식을 사용자 지정하는 방법에는 세 가지가 있습니다:" -#: ../../source/how-to-use-strategies.rst:8 -msgid "Use an existing strategy, for example, :code:`FedAvg`" +#: ../../source/how-to-use-strategies.rst:10 +#, fuzzy +msgid "Use an existing strategy, for example, ``FedAvg``" msgstr "기존 전략(예: :code:`FedAvg`)을 사용합니다" -#: ../../source/how-to-use-strategies.rst:9 -#: ../../source/how-to-use-strategies.rst:40 +#: ../../source/how-to-use-strategies.rst:11 +#: ../../source/how-to-use-strategies.rst:43 msgid "Customize an existing strategy with callback functions" msgstr "콜백 함수로 기존 전략 사용자 지정" -#: ../../source/how-to-use-strategies.rst:10 -#: ../../source/how-to-use-strategies.rst:87 +#: ../../source/how-to-use-strategies.rst:12 +#: ../../source/how-to-use-strategies.rst:99 msgid "Implement a novel strategy" msgstr "새로운 전략 구현" -#: ../../source/how-to-use-strategies.rst:14 +#: ../../source/how-to-use-strategies.rst:15 msgid "Use an existing strategy" msgstr "기존 전략 사용" -#: ../../source/how-to-use-strategies.rst:16 +#: ../../source/how-to-use-strategies.rst:17 msgid "" "Flower comes with a number of popular federated learning strategies " "built-in. A built-in strategy can be instantiated as follows:" msgstr "Flower에는 여러 가지 인기 있는 연합 학습 전략이 기본으로 제공됩니다. 기본 제공 전략은 다음과 같이 인스턴스화할 수 있습니다:" -#: ../../source/how-to-use-strategies.rst:25 +#: ../../source/how-to-use-strategies.rst:27 +#, fuzzy msgid "" "This creates a strategy with all parameters left at their default values " -"and passes it to the :code:`start_server` function. It is usually " -"recommended to adjust a few parameters during instantiation:" +"and passes it to the ``start_server`` function. It is usually recommended" +" to adjust a few parameters during instantiation:" msgstr "" "이렇게 하면 모든 매개변수가 기본값으로 유지된 전략이 생성되어 :code:`start_server` 함수에 전달됩니다. 일반적으로 " "인스턴스화 중에 몇 가지 매개변수를 조정하는 것이 좋습니다:" -#: ../../source/how-to-use-strategies.rst:42 +#: ../../source/how-to-use-strategies.rst:45 msgid "" "Existing strategies provide several ways to customize their behaviour. " "Callback functions allow strategies to call user-provided code during " @@ -8592,58 +8913,59 @@ msgstr "" "기존 전략은 동작을 사용자 지정하는 여러 가지 방법을 제공합니다. 콜백 함수를 사용하면 전략이 실행 중에 사용자가 제공한 코드를 " "호출할 수 있습니다." -#: ../../source/how-to-use-strategies.rst:45 +#: ../../source/how-to-use-strategies.rst:49 msgid "Configuring client fit and client evaluate" msgstr "클라이언트 적합성 및 클라이언트 평가 구성" -#: ../../source/how-to-use-strategies.rst:47 +#: ../../source/how-to-use-strategies.rst:51 +#, fuzzy msgid "" "The server can pass new configuration values to the client each round by " -"providing a function to :code:`on_fit_config_fn`. The provided function " -"will be called by the strategy and must return a dictionary of " -"configuration key values pairs that will be sent to the client. It must " -"return a dictionary of arbitrary configuration values :code:`client.fit`" -" and :code:`client.evaluate` functions during each round of federated " -"learning." +"providing a function to ``on_fit_config_fn``. The provided function will " +"be called by the strategy and must return a dictionary of configuration " +"key values pairs that will be sent to the client. It must return a " +"dictionary of arbitrary configuration values ``client.fit`` and " +"``client.evaluate`` functions during each round of federated learning." msgstr "" "서버는 매 라운드마다 새로운 설정 값을 클라이언트에 전달하기 위해 :code:`on_fit_config_fn`에 함수를 제공할 수 " "있습니다. 제공된 함수는 전략에 의해 호출되며 클라이언트에 전송될 구성 키 값 쌍의 dictionary를 반환해야 합니다. 연합 " "학습의 각 라운드 동안 임의의 구성 값 dictionary인 :code:`client.fit` 및 " ":code:`client.evaluate` 함수를 반환해야 합니다." -#: ../../source/how-to-use-strategies.rst:75 +#: ../../source/how-to-use-strategies.rst:84 #, fuzzy msgid "" -"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " +"The ``on_fit_config_fn`` can be used to pass arbitrary configuration " "values from server to client, and potentially change these values each " "round, for example, to adjust the learning rate. The client will receive " -"the dictionary returned by the :code:`on_fit_config_fn` in its own " -":code:`client.fit()` function." +"the dictionary returned by the ``on_fit_config_fn`` in its own " +"``client.fit()`` function." msgstr "" ":code:`on_fit_config_fn`은 서버에서 클라이언트로 임의의 구성 값을 전달하고, 예를 들어 학습 속도를 조정하기 " "위해 매 라운드마다 이 값을 잠재적으로 변경하는 데 사용할 수 있습니다. 클라이언트는 자체 :code:`client.fit()` " "함수에서 :code:`on_fit_config_fn`이 반환한 dictionary를 받습니다." -#: ../../source/how-to-use-strategies.rst:78 +#: ../../source/how-to-use-strategies.rst:89 +#, fuzzy msgid "" -"Similar to :code:`on_fit_config_fn`, there is also " -":code:`on_evaluate_config_fn` to customize the configuration sent to " -":code:`client.evaluate()`" +"Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` " +"to customize the configuration sent to ``client.evaluate()``" msgstr "" ":code:`on_fit_config_fn`과 유사하게, :code:`client.evaluate()`로 전송되는 구성을 사용자 " "지정하는 :code:`on_evaluate_config_fn`도 있습니다" -#: ../../source/how-to-use-strategies.rst:81 +#: ../../source/how-to-use-strategies.rst:93 msgid "Configuring server-side evaluation" msgstr "서버 측 평가 구성" -#: ../../source/how-to-use-strategies.rst:83 +#: ../../source/how-to-use-strategies.rst:95 +#, fuzzy msgid "" "Server-side evaluation can be enabled by passing an evaluation function " -"to :code:`evaluate_fn`." +"to ``evaluate_fn``." msgstr "서버 측 평가는 :code:`evaluate_fn`에 평가 함수를 전달하여 활성화할 수 있습니다." -#: ../../source/how-to-use-strategies.rst:89 +#: ../../source/how-to-use-strategies.rst:101 msgid "" "Writing a fully custom strategy is a bit more involved, but it provides " "the most flexibility. Read the `Implementing Strategies ` | :doc:`iOS `" -#: ../../source/index.rst:64 +#: ../../source/index.rst:70 msgid "We also made video tutorials for PyTorch:" msgstr "파이토치용 동영상 튜토리얼도 만들었습니다:" -#: ../../source/index.rst:69 +#: ../../source/index.rst:75 msgid "And TensorFlow:" msgstr "그리고 TensorFlow도:" -#: ../../source/index.rst:77 +#: ../../source/index.rst:83 msgid "" "Problem-oriented how-to guides show step-by-step how to achieve a " "specific goal." msgstr "문제 중심의 방법 가이드는 특정 목표를 달성하는 방법을 단계별로 보여줍니다." -#: ../../source/index.rst:110 +#: ../../source/index.rst:116 msgid "" "Understanding-oriented concept guides explain and discuss key topics and " "underlying ideas behind Flower and collaborative AI." msgstr "이해 중심의 개념 가이드에서는 Flower와 협업 AI의 주요 주제와 기본 아이디어를 설명하고 토론합니다." -#: ../../source/index.rst:121 +#: ../../source/index.rst:128 msgid "References" msgstr "참조" -#: ../../source/index.rst:123 +#: ../../source/index.rst:130 msgid "Information-oriented API reference and other reference material." msgstr "정보 지향 API 참조 및 기타 참고 자료." -#: ../../source/index.rst:132::1 +#: ../../source/index.rst:139::1 msgid ":py:obj:`flwr `\\" msgstr ":py:obj:`flwr `\\" -#: ../../source/index.rst:132::1 flwr:1 of +#: ../../source/index.rst:139::1 flwr:1 of msgid "Flower main package." msgstr "Flower 메인 패키지." -#: ../../source/index.rst:149 +#: ../../source/index.rst:155 msgid "Contributor docs" msgstr "기여자 문서" -#: ../../source/index.rst:151 +#: ../../source/index.rst:157 msgid "" "The Flower community welcomes contributions. The following docs are " "intended to help along the way." @@ -8923,6 +9245,10 @@ msgstr "빌드 전달인자" msgid "Optional argument" msgstr "선택적 개선 사항" +#: ../../flwr install:1 +msgid "The source FAB file to install." +msgstr "" + #: ../../flwr log:1 msgid "Get logs from a Flower project run." msgstr "" @@ -8931,7 +9257,7 @@ msgstr "" msgid "Flag to stream or print logs from the Flower run" msgstr "" -#: ../../flwr log +#: ../../flwr log run msgid "default" msgstr "" @@ -8945,6 +9271,19 @@ msgstr "``DISTRO``" msgid "Required argument" msgstr "빌드 전달인자" +#: ../../flwr log:1 +#, fuzzy +msgid "The Flower run ID to query" +msgstr "Flower 커뮤니티 가입하기" + +#: ../../flwr log:1 +msgid "Path of the Flower project to run" +msgstr "" + +#: ../../flwr log:1 +msgid "Name of the federation to run the app on" +msgstr "" + #: ../../flwr new:1 #, fuzzy msgid "Create new Flower App." @@ -8969,6 +9308,11 @@ msgstr "" msgid "The Flower username of the author" msgstr "" +#: ../../flwr new:1 +#, fuzzy +msgid "The name of the Flower App" +msgstr "Flower 기본 이미지의 태그." + #: ../../flwr run:1 #, fuzzy msgid "Run Flower App." @@ -8990,6 +9334,26 @@ msgid "" " the `pyproject.toml` in order to be properly overriden." msgstr "" +#: ../../flwr run:1 +msgid "" +"Use `--stream` with `flwr run` to display logs; logs are not streamed by " +"default." +msgstr "" + +#: ../../flwr run:1 +#, fuzzy +msgid "``False``" +msgstr "``flwr/base``" + +#: ../../flwr run:1 +#, fuzzy +msgid "Path of the Flower App to run." +msgstr "Flower 기본 이미지의 태그." + +#: ../../flwr run:1 +msgid "Name of the federation to run the app on." +msgstr "" + #: ../../source/ref-api-cli.rst:16 msgid "flower-simulation" msgstr "flower 시뮬레이션" @@ -9007,17 +9371,16 @@ msgstr "Flower SuperNode" msgid "flower-server-app" msgstr "flower 서버 프로그램" -#: ../../source/ref-api-cli.rst:49 +#: ../../source/ref-api-cli.rst:50 msgid "" -"Note that since version :code:`1.11.0`, :code:`flower-server-app` no " -"longer supports passing a reference to a `ServerApp` attribute. Instead, " -"you need to pass the path to Flower app via the argument :code:`--app`. " -"This is the path to a directory containing a `pyproject.toml`. You can " -"create a valid Flower app by executing :code:`flwr new` and following the" -" prompt." +"Note that since version ``1.11.0``, ``flower-server-app`` no longer " +"supports passing a reference to a `ServerApp` attribute. Instead, you " +"need to pass the path to Flower app via the argument ``--app``. This is " +"the path to a directory containing a `pyproject.toml`. You can create a " +"valid Flower app by executing ``flwr new`` and following the prompt." msgstr "" -#: ../../source/ref-api-cli.rst:62 +#: ../../source/ref-api-cli.rst:64 #, fuzzy msgid "flower-superexec" msgstr "flower 초연결" @@ -21680,12 +22043,15 @@ msgid "" "`_." msgstr "" -#: ../../source/ref-example-projects.rst:10 -msgid "" -"The following examples are available as standalone projects. Quickstart " -"TensorFlow/Keras ---------------------------" +#: ../../source/ref-example-projects.rst:9 +msgid "The following examples are available as standalone projects." msgstr "" +#: ../../source/ref-example-projects.rst:12 +#, fuzzy +msgid "Quickstart TensorFlow/Keras" +msgstr "빠른 시작 튜토리얼" + #: ../../source/ref-example-projects.rst:14 msgid "" "The TensorFlow/Keras quickstart example shows CIFAR-10 image " @@ -21699,77 +22065,77 @@ msgid "" "tensorflow>`_" msgstr "" -#: ../../source/ref-example-projects.rst:18 +#: ../../source/ref-example-projects.rst:19 msgid ":doc:`Quickstart TensorFlow (Tutorial) `" msgstr "" -#: ../../source/ref-example-projects.rst:19 +#: ../../source/ref-example-projects.rst:20 msgid "" "`Quickstart TensorFlow (Blog Post) `_" msgstr "" -#: ../../source/ref-example-projects.rst:23 -#: ../../source/tutorial-quickstart-pytorch.rst:5 +#: ../../source/ref-example-projects.rst:24 +#: ../../source/tutorial-quickstart-pytorch.rst:4 msgid "Quickstart PyTorch" msgstr "" -#: ../../source/ref-example-projects.rst:25 +#: ../../source/ref-example-projects.rst:26 msgid "" "The PyTorch quickstart example shows CIFAR-10 image classification with a" " simple Convolutional Neural Network:" msgstr "" -#: ../../source/ref-example-projects.rst:28 +#: ../../source/ref-example-projects.rst:29 msgid "" "`Quickstart PyTorch (Code) " "`_" msgstr "" -#: ../../source/ref-example-projects.rst:29 +#: ../../source/ref-example-projects.rst:31 msgid ":doc:`Quickstart PyTorch (Tutorial) `" msgstr "" -#: ../../source/ref-example-projects.rst:33 +#: ../../source/ref-example-projects.rst:34 msgid "PyTorch: From Centralized To Federated" msgstr "" -#: ../../source/ref-example-projects.rst:35 +#: ../../source/ref-example-projects.rst:36 msgid "" "This example shows how a regular PyTorch project can be federated using " "Flower:" msgstr "" -#: ../../source/ref-example-projects.rst:37 +#: ../../source/ref-example-projects.rst:38 msgid "" "`PyTorch: From Centralized To Federated (Code) " "`_" msgstr "" -#: ../../source/ref-example-projects.rst:38 +#: ../../source/ref-example-projects.rst:40 msgid "" ":doc:`PyTorch: From Centralized To Federated (Tutorial) `" msgstr "" -#: ../../source/ref-example-projects.rst:42 +#: ../../source/ref-example-projects.rst:44 msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" msgstr "" -#: ../../source/ref-example-projects.rst:44 +#: ../../source/ref-example-projects.rst:46 msgid "" "This example shows how Flower can be used to build a federated learning " "system that run across Raspberry Pi and Nvidia Jetson:" msgstr "" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-example-projects.rst:49 msgid "" "`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " "`_" msgstr "" -#: ../../source/ref-example-projects.rst:47 +#: ../../source/ref-example-projects.rst:51 msgid "" "`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " "`_" @@ -21785,20 +22151,20 @@ msgstr "" msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" msgstr "" -#: ../../source/ref-faq.rst:8 +#: ../../source/ref-faq.rst:9 msgid "" "Yes, it can! Flower even comes with a few under-the-hood optimizations to" " make it work even better on Colab. Here's a quickstart example:" msgstr "" -#: ../../source/ref-faq.rst:10 +#: ../../source/ref-faq.rst:11 msgid "" "`Flower simulation PyTorch " "`_" msgstr "" -#: ../../source/ref-faq.rst:11 +#: ../../source/ref-faq.rst:12 msgid "" "`Flower simulation TensorFlow/Keras " "`_" @@ -21821,20 +22187,20 @@ msgstr "" msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" msgstr "" -#: ../../source/ref-faq.rst:19 +#: ../../source/ref-faq.rst:20 msgid "" "Yes, it does. Please take a look at our `blog post " "`_ or check out the code examples:" msgstr "" -#: ../../source/ref-faq.rst:21 +#: ../../source/ref-faq.rst:22 msgid "" "`Android Kotlin example `_" msgstr "" -#: ../../source/ref-faq.rst:22 +#: ../../source/ref-faq.rst:23 msgid "`Android Java example `_" msgstr "" @@ -21842,50 +22208,50 @@ msgstr "" msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" msgstr "" -#: ../../source/ref-faq.rst:26 +#: ../../source/ref-faq.rst:27 msgid "" "Yes, of course. A list of available examples using Flower within a " "blockchain environment is available here:" msgstr "" -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-faq.rst:30 msgid "`FLock: A Decentralised AI Training Platform `_." msgstr "" -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-faq.rst:30 msgid "Contribute to on-chain training the model and earn rewards." msgstr "" -#: ../../source/ref-faq.rst:30 +#: ../../source/ref-faq.rst:31 msgid "Local blockchain with federated learning simulation." msgstr "" -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-faq.rst:32 msgid "" "`Flower meets Nevermined GitHub Repository `_." msgstr "" -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-faq.rst:33 msgid "" "`Flower meets Nevermined YouTube video " "`_." msgstr "" -#: ../../source/ref-faq.rst:33 +#: ../../source/ref-faq.rst:34 msgid "" "`Flower meets KOSMoS `_." msgstr "" -#: ../../source/ref-faq.rst:34 +#: ../../source/ref-faq.rst:35 msgid "" "`Flower meets Talan blog post `_ ." msgstr "" -#: ../../source/ref-faq.rst:35 +#: ../../source/ref-faq.rst:36 msgid "" "`Flower meets Talan GitHub Repository " "`_ ." @@ -22091,28 +22457,28 @@ msgid "" "app using Flower." msgstr "" -#: ../../source/tutorial-quickstart-android.rst:5 +#: ../../source/tutorial-quickstart-android.rst:4 msgid "Quickstart Android" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:10 +#: ../../source/tutorial-quickstart-android.rst:9 msgid "" "Let's build a federated learning system using TFLite and Flower on " "Android!" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:12 +#: ../../source/tutorial-quickstart-android.rst:11 msgid "" "Please refer to the `full code example " "`_ to learn " "more." msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:5 +#: ../../source/tutorial-quickstart-fastai.rst:4 msgid "Quickstart fastai" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:7 +#: ../../source/tutorial-quickstart-fastai.rst:6 msgid "" "In this federated learning tutorial we will learn how to train a " "SqueezeNet model on MNIST using Flower and fastai. It is recommended to " @@ -22120,24 +22486,24 @@ msgid "" " `." msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:12 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +#: ../../source/tutorial-quickstart-fastai.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 msgid "Then, clone the code example directly from GitHub:" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:20 +#: ../../source/tutorial-quickstart-fastai.rst:18 msgid "" "This will create a new directory called `quickstart-fastai` containing " "the following files:" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:33 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:33 +#: ../../source/tutorial-quickstart-fastai.rst:31 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 #, fuzzy msgid "Next, activate your environment, then run:" msgstr "그 후 가상 환경을 활성화합니다:" -#: ../../source/tutorial-quickstart-fastai.rst:43 +#: ../../source/tutorial-quickstart-fastai.rst:41 msgid "" "This example by default runs the Flower Simulation Engine, creating a " "federation of 10 nodes using `FedAvg `_ of this tutorial in ``examples/quickstart-fasai`` " @@ -22180,11 +22546,11 @@ msgid "" "with 🤗 HuggingFace Transformers in order to fine-tune an LLM." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:5 +#: ../../source/tutorial-quickstart-huggingface.rst:4 msgid "Quickstart 🤗 Transformers" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:7 +#: ../../source/tutorial-quickstart-huggingface.rst:6 msgid "" "In this federated learning tutorial we will learn how to train a large " "language model (LLM) on the `IMDB " @@ -22194,7 +22560,7 @@ msgid "" "`." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:14 +#: ../../source/tutorial-quickstart-huggingface.rst:12 msgid "" "Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " "project. It will generate all the files needed to run, by default with " @@ -22203,74 +22569,74 @@ msgid "" "|iidpartitioner|_." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:20 -#: ../../source/tutorial-quickstart-mlx.rst:19 -#: ../../source/tutorial-quickstart-pytorch.rst:19 -#: ../../source/tutorial-quickstart-tensorflow.rst:20 +#: ../../source/tutorial-quickstart-huggingface.rst:17 +#: ../../source/tutorial-quickstart-mlx.rst:17 +#: ../../source/tutorial-quickstart-pytorch.rst:18 +#: ../../source/tutorial-quickstart-tensorflow.rst:18 msgid "" "Now that we have a rough idea of what this example is about, let's get " "started. First, install Flower in your new environment:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:28 +#: ../../source/tutorial-quickstart-huggingface.rst:25 msgid "" "Then, run the command below. You will be prompted to select one of the " "available templates (choose ``HuggingFace``), give a name to your " "project, and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:36 -#: ../../source/tutorial-quickstart-mlx.rst:35 -#: ../../source/tutorial-quickstart-pytorch.rst:35 -#: ../../source/tutorial-quickstart-tensorflow.rst:36 +#: ../../source/tutorial-quickstart-huggingface.rst:33 +#: ../../source/tutorial-quickstart-mlx.rst:32 +#: ../../source/tutorial-quickstart-pytorch.rst:34 +#: ../../source/tutorial-quickstart-tensorflow.rst:34 msgid "" "After running it you'll notice a new directory with your project name has" " been created. It should have the following structure:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:50 -#: ../../source/tutorial-quickstart-mlx.rst:49 -#: ../../source/tutorial-quickstart-pytorch.rst:49 -#: ../../source/tutorial-quickstart-tensorflow.rst:50 +#: ../../source/tutorial-quickstart-huggingface.rst:47 +#: ../../source/tutorial-quickstart-mlx.rst:46 +#: ../../source/tutorial-quickstart-pytorch.rst:48 +#: ../../source/tutorial-quickstart-tensorflow.rst:48 msgid "" "If you haven't yet installed the project and its dependencies, you can do" " so by:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:58 -#: ../../source/tutorial-quickstart-pytorch.rst:57 -#: ../../source/tutorial-quickstart-tensorflow.rst:58 +#: ../../source/tutorial-quickstart-huggingface.rst:54 +#: ../../source/tutorial-quickstart-pytorch.rst:55 +#: ../../source/tutorial-quickstart-tensorflow.rst:55 msgid "To run the project, do:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:106 +#: ../../source/tutorial-quickstart-huggingface.rst:102 msgid "You can also run the project with GPU as follows:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:113 +#: ../../source/tutorial-quickstart-huggingface.rst:109 msgid "" "This will use the default arguments where each ``ClientApp`` will use 2 " "CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:124 -#: ../../source/tutorial-quickstart-mlx.rst:114 -#: ../../source/tutorial-quickstart-pytorch.rst:113 +#: ../../source/tutorial-quickstart-huggingface.rst:120 +#: ../../source/tutorial-quickstart-mlx.rst:110 +#: ../../source/tutorial-quickstart-pytorch.rst:111 msgid "" "What follows is an explanation of each component in the project you just " "created: dataset partition, the model, defining the ``ClientApp`` and " "defining the ``ServerApp``." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:130 -#: ../../source/tutorial-quickstart-mlx.rst:120 -#: ../../source/tutorial-quickstart-pytorch.rst:119 -#: ../../source/tutorial-quickstart-tensorflow.rst:116 +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 #, fuzzy msgid "The Data" msgstr "Metadata" -#: ../../source/tutorial-quickstart-huggingface.rst:132 +#: ../../source/tutorial-quickstart-huggingface.rst:126 msgid "" "This tutorial uses |flowerdatasets|_ to easily download and partition the" " `IMDB `_ dataset. In " @@ -22283,14 +22649,14 @@ msgid "" "their data partition." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:178 -#: ../../source/tutorial-quickstart-mlx.rst:164 -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#: ../../source/tutorial-quickstart-tensorflow.rst:145 +#: ../../source/tutorial-quickstart-huggingface.rst:171 +#: ../../source/tutorial-quickstart-mlx.rst:155 +#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-quickstart-tensorflow.rst:139 msgid "The Model" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:180 +#: ../../source/tutorial-quickstart-huggingface.rst:173 msgid "" "We will leverage 🤗 Hugging Face to federate the training of language " "models over multiple clients using Flower. More specifically, we will " @@ -22300,13 +22666,13 @@ msgid "" "larger GPUs, feel free to use larger models!" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:193 +#: ../../source/tutorial-quickstart-huggingface.rst:185 msgid "" "Note that here, ``model_name`` is a string that will be loaded from the " "``Context`` in the ClientApp and ServerApp." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:196 +#: ../../source/tutorial-quickstart-huggingface.rst:188 msgid "" "In addition to loading the pretrained model weights and architecture, we " "also include two utility functions to perform both training (i.e. " @@ -22319,15 +22685,15 @@ msgid "" "perform local training or evaluation:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:239 -#: ../../source/tutorial-quickstart-mlx.rst:210 -#: ../../source/tutorial-quickstart-pytorch.rst:234 -#: ../../source/tutorial-quickstart-tensorflow.rst:176 +#: ../../source/tutorial-quickstart-huggingface.rst:228 +#: ../../source/tutorial-quickstart-mlx.rst:199 +#: ../../source/tutorial-quickstart-pytorch.rst:224 +#: ../../source/tutorial-quickstart-tensorflow.rst:168 #, fuzzy msgid "The ClientApp" msgstr "클라이언트앱" -#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-huggingface.rst:230 msgid "" "The main changes we have to make to use 🤗 Hugging Face with Flower will " "be found in the ``get_weights()`` and ``set_weights()`` functions. Under " @@ -22340,8 +22706,8 @@ msgid "" "them to an existing PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:254 -#: ../../source/tutorial-quickstart-pytorch.rst:245 +#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-pytorch.rst:234 msgid "" "The specific implementation of ``get_weights()`` and ``set_weights()`` " "depends on the type of models you use. The ones shown below work for a " @@ -22349,8 +22715,8 @@ msgid "" "have more exotic model architectures." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:269 -#: ../../source/tutorial-quickstart-pytorch.rst:261 +#: ../../source/tutorial-quickstart-huggingface.rst:257 +#: ../../source/tutorial-quickstart-pytorch.rst:250 msgid "" "The rest of the functionality is directly inspired by the centralized " "case. The ``fit()`` method in the client trains the model using the local" @@ -22358,7 +22724,7 @@ msgid "" "model received on a held-out validation set that the client might have:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:296 +#: ../../source/tutorial-quickstart-huggingface.rst:283 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " "defined above by means of a ``client_fn()`` callback. Note that the " @@ -22369,15 +22735,15 @@ msgid "" "additional hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:330 -#: ../../source/tutorial-quickstart-mlx.rst:376 -#: ../../source/tutorial-quickstart-pytorch.rst:321 -#: ../../source/tutorial-quickstart-tensorflow.rst:245 +#: ../../source/tutorial-quickstart-huggingface.rst:316 +#: ../../source/tutorial-quickstart-mlx.rst:361 +#: ../../source/tutorial-quickstart-pytorch.rst:307 +#: ../../source/tutorial-quickstart-tensorflow.rst:232 #, fuzzy msgid "The ServerApp" msgstr "Flower 서버앱" -#: ../../source/tutorial-quickstart-huggingface.rst:332 +#: ../../source/tutorial-quickstart-huggingface.rst:318 msgid "" "To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" " identical signature to that of ``client_fn()`` but the return type is " @@ -22388,13 +22754,13 @@ msgid "" "value defined in the ``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:371 +#: ../../source/tutorial-quickstart-huggingface.rst:356 msgid "" "Congratulations! You've successfully built and run your first federated " "learning system for an LLM." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:376 +#: ../../source/tutorial-quickstart-huggingface.rst:361 msgid "" "Check the source code of the extended version of this tutorial in " "|quickstart_hf_link|_ in the Flower GitHub repository. For a " @@ -22408,11 +22774,11 @@ msgid "" "using Flower to train a neural network on MNIST." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:5 +#: ../../source/tutorial-quickstart-ios.rst:4 msgid "Quickstart iOS" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:10 +#: ../../source/tutorial-quickstart-ios.rst:9 msgid "" "In this tutorial we will learn how to train a Neural Network on MNIST " "using Flower and CoreML on iOS devices." @@ -22426,13 +22792,13 @@ msgid "" "implementation in iOS, it is recommended to use Xcode as our IDE." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:15 +#: ../../source/tutorial-quickstart-ios.rst:17 msgid "" "Our example consists of one Python *server* and two iPhone *clients* that" " all have the same model." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:17 +#: ../../source/tutorial-quickstart-ios.rst:20 msgid "" "*Clients* are responsible for generating individual weight updates for " "the model based on their local datasets. These updates are then sent to " @@ -22441,24 +22807,24 @@ msgid "" "each *client*. A complete cycle of weight updates is called a *round*." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:21 +#: ../../source/tutorial-quickstart-ios.rst:26 msgid "" "Now that we have a rough idea of what is going on, let's get started to " "setup our Flower server environment. We first need to install Flower. You" " can do this by using pip:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:27 +#: ../../source/tutorial-quickstart-ios.rst:33 msgid "Or Poetry:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-xgboost.rst:55 +#: ../../source/tutorial-quickstart-ios.rst:40 +#: ../../source/tutorial-quickstart-scikitlearn.rst:43 +#: ../../source/tutorial-quickstart-xgboost.rst:65 msgid "Flower Client" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:36 +#: ../../source/tutorial-quickstart-ios.rst:42 msgid "" "Now that we have all our dependencies installed, let's run a simple " "distributed training using CoreML as our local training pipeline and " @@ -22467,91 +22833,90 @@ msgid "" "the Swift SDK. The client implementation can be seen below:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:72 +#: ../../source/tutorial-quickstart-ios.rst:80 msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a" -" dependency in your project. For our application, we will store the logic" -" of our app in :code:`FLiOSModel.swift` and the UI elements in " -":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" -" in this quickstart. Please refer to the `full code example " +"Let's create a new application project in Xcode and add ``flwr`` as a " +"dependency in your project. For our application, we will store the logic " +"of our app in ``FLiOSModel.swift`` and the UI elements in " +"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" +" quickstart. Please refer to the `full code example " "`_ to learn more " "about the app." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:75 -msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#: ../../source/tutorial-quickstart-ios.rst:86 +msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:83 +#: ../../source/tutorial-quickstart-ios.rst:94 msgid "" "Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " "will be bundled inside the application during deployment to your iOS " "device. We need to pass the url to access mlmodel and run CoreML machine " "learning processes, it can be retrieved by calling the function " -":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " -"into :code:`MLBatchProvider` object. The preprocessing is done inside " -":code:`DataLoader.swift`." +"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" +" ``MLBatchProvider`` object. The preprocessing is done inside " +"``DataLoader.swift``." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:99 +#: ../../source/tutorial-quickstart-ios.rst:112 msgid "" "Since CoreML does not allow the model parameters to be seen before " "training, and accessing the model parameters during or after the training" " can only be done by specifying the layer name, we need to know this " "information beforehand, through looking at the model specification, which" " are written as proto files. The implementation can be seen in " -":code:`MLModelInspect`." +"``MLModelInspect``." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:102 +#: ../../source/tutorial-quickstart-ios.rst:118 msgid "" "After we have all of the necessary information, let's create our Flower " "client." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:117 +#: ../../source/tutorial-quickstart-ios.rst:133 msgid "" "Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function :code:`startFlwrGRPC`." +"by passing our Flower client to the function ``startFlwrGRPC``." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:124 +#: ../../source/tutorial-quickstart-ios.rst:141 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " -"The attribute :code:`hostname` and :code:`port` tells the client which " -"server to connect to. This can be done by entering the hostname and port " -"in the application before clicking the start button to start the " -"federated learning process." +"That's it for the client. We only have to implement ``Client`` or call " +"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" +" ``hostname`` and ``port`` tells the client which server to connect to. " +"This can be done by entering the hostname and port in the application " +"before clicking the start button to start the federated learning process." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-xgboost.rst:341 +#: ../../source/tutorial-quickstart-ios.rst:148 +#: ../../source/tutorial-quickstart-scikitlearn.rst:179 +#: ../../source/tutorial-quickstart-xgboost.rst:358 msgid "Flower Server" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:131 +#: ../../source/tutorial-quickstart-ios.rst:150 msgid "" "For simple workloads we can start a Flower server and leave all the " "configuration possibilities at their default values. In a file named " -":code:`server.py`, import Flower and start the server:" +"``server.py``, import Flower and start the server:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-scikitlearn.rst:230 +#: ../../source/tutorial-quickstart-ios.rst:161 +#: ../../source/tutorial-quickstart-scikitlearn.rst:254 msgid "Train the model, federated!" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-xgboost.rst:567 +#: ../../source/tutorial-quickstart-ios.rst:163 +#: ../../source/tutorial-quickstart-xgboost.rst:590 msgid "" "With both client and server ready, we can now run everything and see " "federated learning in action. FL systems usually have a server and " "multiple clients. We therefore have to start the server first:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:152 +#: ../../source/tutorial-quickstart-ios.rst:171 msgid "" "Once the server is running we can start the clients in different " "terminals. Build and run the client through your Xcode, one through Xcode" @@ -22561,12 +22926,12 @@ msgid "" "simulator-or-on-a-device>`_." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:156 +#: ../../source/tutorial-quickstart-ios.rst:177 msgid "" "Congratulations! You've successfully built and run your first federated " "learning system in your ios device. The full `source code " "`_ for this " -"example can be found in :code:`examples/ios`." +"example can be found in ``examples/ios``." msgstr "" #: ../../source/tutorial-quickstart-jax.rst:-1 @@ -22575,11 +22940,11 @@ msgid "" "with Jax to train a linear regression model on a scikit-learn dataset." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:5 +#: ../../source/tutorial-quickstart-jax.rst:4 msgid "Quickstart JAX" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:10 +#: ../../source/tutorial-quickstart-jax.rst:9 msgid "" "This tutorial will show you how to use Flower to build a federated " "version of an existing JAX workload. We are using JAX to train a linear " @@ -22600,38 +22965,41 @@ msgstr "" " 튜토리얼`을 기반으로 centralized 학습 접근 방식을 구축합니다. 그런 다음 centralized 트레이닝 코드를 기반으로" " federated 방식으로 트레이닝을 실행합니다." -#: ../../source/tutorial-quickstart-jax.rst:16 +#: ../../source/tutorial-quickstart-jax.rst:20 +#, fuzzy msgid "" "Before we start building our JAX example, we need install the packages " -":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" +"``jax``, ``jaxlib``, ``scikit-learn``, and ``flwr``:" msgstr "" "JAX 예제 빌드를 시작하기 전에 :code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, " ":code:`flwr` 패키지를 설치해야 합니다:" -#: ../../source/tutorial-quickstart-jax.rst:24 +#: ../../source/tutorial-quickstart-jax.rst:28 msgid "Linear Regression with JAX" msgstr "JAX를 사용한 선형 회귀" -#: ../../source/tutorial-quickstart-jax.rst:26 +#: ../../source/tutorial-quickstart-jax.rst:30 +#, fuzzy msgid "" "We begin with a brief description of the centralized training code based " -"on a :code:`Linear Regression` model. If you want a more in-depth " -"explanation of what's going on then have a look at the official `JAX " -"documentation `_." +"on a ``Linear Regression`` model. If you want a more in-depth explanation" +" of what's going on then have a look at the official `JAX documentation " +"`_." msgstr "" "먼저 :code:`선형 회귀` 모델을 기반으로 하는 중앙 집중식 훈련 코드에 대한 간략한 설명부터 시작하겠습니다. 더 자세한 설명을" " 원하시면 공식 `JAX 문서 `_를 참조하세요." -#: ../../source/tutorial-quickstart-jax.rst:29 +#: ../../source/tutorial-quickstart-jax.rst:34 +#, fuzzy msgid "" -"Let's create a new file called :code:`jax_training.py` with all the " +"Let's create a new file called ``jax_training.py`` with all the " "components required for a traditional (centralized) linear regression " -"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " -"be imported. In addition, we need to import :code:`sklearn` since we use " -":code:`make_regression` for the dataset and :code:`train_test_split` to " -"split the dataset into a training and test set. You can see that we do " -"not yet import the :code:`flwr` package for federated learning. This will" -" be done later." +"training. First, the JAX packages ``jax`` and ``jaxlib`` need to be " +"imported. In addition, we need to import ``sklearn`` since we use " +"``make_regression`` for the dataset and ``train_test_split`` to split the" +" dataset into a training and test set. You can see that we do not yet " +"import the ``flwr`` package for federated learning. This will be done " +"later." msgstr "" "전통적인(중앙 집중식) 선형 회귀 훈련에 필요한 모든 구성 요소가 포함된 :code:`jax_training.py`라는 새 파일을 " "생성해 보겠습니다. 먼저, JAX 패키지인 :code:`jax`와 :code:`jaxlib`를 가져와야 합니다. 또한 데이터 세트에" @@ -22639,55 +23007,58 @@ msgstr "" ":code:`train_test_split`을 사용하므로 :code:`sklearn`을 가져와야 합니다. 연합 학습을 위해 아직 " ":code:`flwr` 패키지를 가져오지 않은 것을 볼 수 있습니다. 이 작업은 나중에 수행됩니다." -#: ../../source/tutorial-quickstart-jax.rst:43 -msgid "" -"The :code:`load_data()` function loads the mentioned training and test " -"sets." +#: ../../source/tutorial-quickstart-jax.rst:51 +#, fuzzy +msgid "The ``load_data()`` function loads the mentioned training and test sets." msgstr "code:`load_data()` 함수는 앞서 언급한 트레이닝 및 테스트 세트를 로드합니다." -#: ../../source/tutorial-quickstart-jax.rst:53 +#: ../../source/tutorial-quickstart-jax.rst:63 +#, fuzzy msgid "" -"The model architecture (a very simple :code:`Linear Regression` model) is" -" defined in :code:`load_model()`." +"The model architecture (a very simple ``Linear Regression`` model) is " +"defined in ``load_model()``." msgstr "모델 아키텍처(매우 간단한 :code:`선형 회귀` 모델)는 :code:`load_model()`에 정의되어 있습니다." -#: ../../source/tutorial-quickstart-jax.rst:65 +#: ../../source/tutorial-quickstart-jax.rst:73 +#, fuzzy msgid "" -"We now need to define the training (function :code:`train()`), which " -"loops over the training set and measures the loss (function " -":code:`loss_fn()`) for each batch of training examples. The loss function" -" is separate since JAX takes derivatives with a :code:`grad()` function " -"(defined in the :code:`main()` function and called in :code:`train()`)." +"We now need to define the training (function ``train()``), which loops " +"over the training set and measures the loss (function ``loss_fn()``) for " +"each batch of training examples. The loss function is separate since JAX " +"takes derivatives with a ``grad()`` function (defined in the ``main()`` " +"function and called in ``train()``)." msgstr "" "이제 훈련 집합을 반복하고 각 훈련 예제 배치에 대해 손실을 측정하는(함수 :code:`loss_fn()`) 훈련(함수 " ":code:`train()`)을 정의해야 합니다. JAX는 :code:`grad()` 함수(:code:`main()` 함수에 " "정의되고 :code:`train()`에서 호출됨)로 파생물을 취하므로 손실 함수는 분리되어 있습니다." -#: ../../source/tutorial-quickstart-jax.rst:83 +#: ../../source/tutorial-quickstart-jax.rst:95 +#, fuzzy msgid "" -"The evaluation of the model is defined in the function " -":code:`evaluation()`. The function takes all test examples and measures " -"the loss of the linear regression model." +"The evaluation of the model is defined in the function ``evaluation()``. " +"The function takes all test examples and measures the loss of the linear " +"regression model." msgstr "" "모델의 평가는 :code:`evaluation()` 함수에 정의되어 있습니다. 이 함수는 모든 테스트 예제를 가져와 선형 회귀 " "모델의 손실을 측정합니다." -#: ../../source/tutorial-quickstart-jax.rst:94 +#: ../../source/tutorial-quickstart-jax.rst:107 +#, fuzzy msgid "" "Having defined the data loading, model architecture, training, and " "evaluation we can put everything together and train our model using JAX. " -"As already mentioned, the :code:`jax.grad()` function is defined in " -":code:`main()` and passed to :code:`train()`." +"As already mentioned, the ``jax.grad()`` function is defined in " +"``main()`` and passed to ``train()``." msgstr "" "데이터 로딩, 모델 아키텍처, 훈련 및 평가를 정의했으므로 이제 모든 것을 종합하여 JAX를 사용 모델을 훈련할 수 있습니다. 이미" " 언급했듯이 :code:`jax.grad()` 함수는 :code:`main()`에 정의되어 :code:`train()`에 " "전달됩니다." -#: ../../source/tutorial-quickstart-jax.rst:111 +#: ../../source/tutorial-quickstart-jax.rst:126 msgid "You can now run your (centralized) JAX linear regression workload:" msgstr "이제 (중앙 집중식) JAX 선형 회귀 워크로드를 실행할 수 있습니다:" -#: ../../source/tutorial-quickstart-jax.rst:117 +#: ../../source/tutorial-quickstart-jax.rst:132 msgid "" "So far this should all look fairly familiar if you've used JAX before. " "Let's take the next step and use what we've built to create a simple " @@ -22696,49 +23067,51 @@ msgstr "" "지금까지는 JAX를 사용해 본 적이 있다면 이 모든 것이 상당히 익숙해 보일 것입니다. 다음 단계로 넘어가서 우리가 구축한 것을 " "사용하여 하나의 서버와 두 개의 클라이언트로 구성된 간단한 연합 학습 시스템을 만들어 보겠습니다." -#: ../../source/tutorial-quickstart-jax.rst:121 +#: ../../source/tutorial-quickstart-jax.rst:137 msgid "JAX meets Flower" msgstr "JAX와 Flower의 만남" -#: ../../source/tutorial-quickstart-jax.rst:123 +#: ../../source/tutorial-quickstart-jax.rst:139 +#, fuzzy msgid "" "The concept of federating an existing workload is always the same and " "easy to understand. We have to start a *server* and then use the code in " -":code:`jax_training.py` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server*, which averages all received " -"parameter updates. This describes one round of the federated learning " -"process, and we repeat this for multiple rounds." +"``jax_training.py`` for the *clients* that are connected to the *server*." +" The *server* sends model parameters to the clients. The *clients* run " +"the training and update the parameters. The updated parameters are sent " +"back to the *server*, which averages all received parameter updates. This" +" describes one round of the federated learning process, and we repeat " +"this for multiple rounds." msgstr "" "기존 워크로드를 연합하는 개념은 항상 동일하고 이해하기 쉽습니다. 서버*를 시작한 다음 *서버*에 연결된 *클라이언트*에 대해 " ":code:`jax_training.py`의 코드를 사용해야 합니다. *서버*는 모델 파라미터를 클라이언트로 전송합니다. " "클라이언트는 학습을 실행하고 파라미터를 업데이트합니다. 업데이트된 파라미터는 *서버*로 다시 전송되며, 수신된 모든 파라미터 " "업데이트의 평균을 구합니다. 이는 연합 학습 프로세스의 한 라운드를 설명하며, 이 과정을 여러 라운드에 걸쳐 반복합니다." -#: ../../source/tutorial-quickstart-jax.rst:145 +#: ../../source/tutorial-quickstart-jax.rst:167 +#, fuzzy msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined JAX training in :code:`jax_training.py`. Our" -" *client* needs to import :code:`flwr`, but also :code:`jax` and " -":code:`jaxlib` to update the parameters on our JAX model:" +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined JAX training in ``jax_training.py``. Our " +"*client* needs to import ``flwr``, but also ``jax`` and ``jaxlib`` to " +"update the parameters on our JAX model:" msgstr "" "마지막으로, :code:`client.py`에서 *client* 로직을 정의하고 :code:`jax_training.py`에서 " "이전에 정의한 JAX 교육을 기반으로 빌드합니다. *클라이언트*는 :code:`flwr`을 가져와야 하며, JAX 모델의 파라미터를" " 업데이트하기 위해 :code:`jax` 및 :code:`jaxlib`도 가져와야 합니다:" -#: ../../source/tutorial-quickstart-jax.rst:160 +#: ../../source/tutorial-quickstart-jax.rst:182 +#, fuzzy msgid "" "Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " -"easier to implement than :code:`Client` if you use a framework with good " -"NumPy interoperability (like JAX) because it avoids some of the " -"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" -" to implement four methods, two methods for getting/setting model " -"parameters, one method for training the model, and one method for testing" -" the model:" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``FlowerClient``. ``NumPyClient`` is slightly easier to implement" +" than ``Client`` if you use a framework with good NumPy interoperability " +"(like JAX) because it avoids some of the boilerplate that would otherwise" +" be necessary. ``FlowerClient`` needs to implement four methods, two " +"methods for getting/setting model parameters, one method for training the" +" model, and one method for testing the model:" msgstr "" "Flower *클라이언트*를 구현한다는 것은 기본적으로 :code:`flwr.client.Client` 또는 " ":code:`flwr.client.NumPyClient`의 서브클래스를 구현하는 것을 의미합니다. 구현은 " @@ -22748,40 +23121,43 @@ msgstr "" "code:`FlowerClient`는 모델 매개변수를 가져오거나 설정하는 메서드 2개, 모델 학습을 위한 메서드 1개, 모델 " "테스트를 위한 메서드 1개 등 총 4개의 메서드를 구현해야 합니다:" -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid ":code:`set_parameters (optional)`" +#: ../../source/tutorial-quickstart-jax.rst:194 +#, fuzzy +msgid "``set_parameters (optional)``" msgstr ":code:`set_parameters (선택사항)`" -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid "transform parameters to NumPy :code:`ndarray`'s" +#: ../../source/tutorial-quickstart-jax.rst:193 +#, fuzzy +msgid "transform parameters to NumPy ``ndarray``'s" msgstr "매개 변수를 NumPy :code:`ndarray`로 변환" -#: ../../source/tutorial-quickstart-jax.rst:174 +#: ../../source/tutorial-quickstart-jax.rst:203 msgid "get the updated local model parameters and return them to the server" msgstr "업데이트된 로컬 모델 파라미터를 가져와 서버로 반환합니다" -#: ../../source/tutorial-quickstart-jax.rst:178 +#: ../../source/tutorial-quickstart-jax.rst:208 msgid "return the local loss to the server" msgstr "로컬 손실을 서버로 반환합니다" -#: ../../source/tutorial-quickstart-jax.rst:180 +#: ../../source/tutorial-quickstart-jax.rst:210 +#, fuzzy msgid "" "The challenging part is to transform the JAX model parameters from " -":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" -" `NumPyClient`." +"``DeviceArray`` to ``NumPy ndarray`` to make them compatible with " +"`NumPyClient`." msgstr "" "어려운 부분은 JAX 모델 매개변수를 :code:`DeviceArray`에서 :code:`NumPy ndarray`로 변환하여 " "`NumPyClient`와 호환되도록 하는 것입니다." -#: ../../source/tutorial-quickstart-jax.rst:182 +#: ../../source/tutorial-quickstart-jax.rst:213 +#, fuzzy msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`evaluate()` previously " -"defined in :code:`jax_training.py`. So what we really do here is we tell " -"Flower through our :code:`NumPyClient` subclass which of our already " -"defined functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``evaluate()`` previously defined in " +"``jax_training.py``. So what we really do here is we tell Flower through " +"our ``NumPyClient`` subclass which of our already defined functions to " +"call for training and evaluation. We included type annotations to give " +"you a better understanding of the data types that get passed around." msgstr "" "두 개의 :code:`NumPyClient` 메서드인 :code:`fit`과 :code:`evaluate`는 이전에 " ":code:`jax_training.py`에 정의된 함수 :code:`train()`과 :code:`evaluate()`를 " @@ -22789,11 +23165,11 @@ msgstr "" ":code:`NumPyClient` 서브클래스를 통해 Flower에게 알려주는 것입니다. 전달되는 데이터 유형을 더 잘 이해할 수 " "있도록 유형 type annotation을 포함했습니다." -#: ../../source/tutorial-quickstart-jax.rst:251 +#: ../../source/tutorial-quickstart-jax.rst:286 msgid "Having defined the federation process, we can run it." msgstr "연합 프로세스를 정의했으면 이제 실행할 수 있습니다." -#: ../../source/tutorial-quickstart-jax.rst:280 +#: ../../source/tutorial-quickstart-jax.rst:315 msgid "" "in each window (make sure that the server is still running before you do " "so) and see your JAX project run federated learning across two clients. " @@ -22802,7 +23178,7 @@ msgstr "" "를 입력하고(그 전에 서버가 계속 실행 중인지 확인하세요) 두 클라이언트에서 연합 학습을 실행하는 JAX 프로젝트를 확인합니다. " "축하합니다!" -#: ../../source/tutorial-quickstart-jax.rst:285 +#: ../../source/tutorial-quickstart-jax.rst:321 msgid "" "The source code of this example was improved over time and can be found " "here: `Quickstart JAX `_. 두 " "클라이언트가 동일한 데이터 세트를 로드하기 때문에 이 예제는 다소 단순화되어 있습니다." -#: ../../source/tutorial-quickstart-jax.rst:288 +#: ../../source/tutorial-quickstart-jax.rst:325 msgid "" "You're now prepared to explore this topic further. How about using a more" " sophisticated model or using a different dataset? How about adding more " @@ -22822,12 +23198,12 @@ msgstr "" "이제 이 주제를 더 자세히 살펴볼 준비가 되었습니다. 더 정교한 모델을 사용하거나 다른 데이터 집합을 사용해 보는 것은 어떨까요? " "클라이언트를 더 추가하는 것은 어떨까요?" -#: ../../source/tutorial-quickstart-mlx.rst:5 +#: ../../source/tutorial-quickstart-mlx.rst:4 #, fuzzy msgid "Quickstart MLX" msgstr "빠른 시작" -#: ../../source/tutorial-quickstart-mlx.rst:7 +#: ../../source/tutorial-quickstart-mlx.rst:6 msgid "" "In this federated learning tutorial we will learn how to train simple MLP" " on MNIST using Flower and MLX. It is recommended to create a virtual " @@ -22835,7 +23211,7 @@ msgid "" "how-to-set-up-a-virtual-env>`." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:12 +#: ../../source/tutorial-quickstart-mlx.rst:10 msgid "" "Let's use `flwr new` to create a complete Flower+MLX project. It will " "generate all the files needed to run, by default with the Simulation " @@ -22847,24 +23223,24 @@ msgid "" "api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner>`_." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:27 +#: ../../source/tutorial-quickstart-mlx.rst:25 msgid "" "Then, run the command below. You will be prompted to select of the " "available templates (choose ``MLX``), give a name to your project, and " "type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:57 +#: ../../source/tutorial-quickstart-mlx.rst:53 msgid "To run the project do:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:106 +#: ../../source/tutorial-quickstart-mlx.rst:102 msgid "" "You can also override the parameters defined in " "``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:122 +#: ../../source/tutorial-quickstart-mlx.rst:116 msgid "" "We will use `Flower Datasets `_ to " "easily download and partition the `MNIST` dataset. In this example you'll" @@ -22875,20 +23251,20 @@ msgid "" "api/flwr_datasets.partitioner.html>`_ available in Flower Datasets:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:166 +#: ../../source/tutorial-quickstart-mlx.rst:157 msgid "" "We define the model as in the `centralized MLX example " "`_, it's a " "simple MLP:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:190 +#: ../../source/tutorial-quickstart-mlx.rst:180 msgid "" "We also define some utility functions to test our model and to iterate " "over batches." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:212 +#: ../../source/tutorial-quickstart-mlx.rst:201 msgid "" "The main changes we have to make to use `MLX` with `Flower` will be found" " in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " @@ -22897,17 +23273,17 @@ msgid "" "messages to work)." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:218 +#: ../../source/tutorial-quickstart-mlx.rst:206 msgid "The way MLX stores its parameters is as follows:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:231 +#: ../../source/tutorial-quickstart-mlx.rst:219 msgid "" "Therefore, to get our list of ``np.array`` objects, we need to extract " "each array and convert them into a NumPy array:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:240 +#: ../../source/tutorial-quickstart-mlx.rst:228 msgid "" "For the ``set_params()`` function, we perform the reverse operation. We " "receive a list of NumPy arrays and want to convert them into MLX " @@ -22915,24 +23291,24 @@ msgid "" "them to the `weight` and `bias` keys of each layer dict:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:255 +#: ../../source/tutorial-quickstart-mlx.rst:243 msgid "" "The rest of the functionality is directly inspired by the centralized " "case. The ``fit()`` method in the client trains the model using the local" " dataset:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:272 +#: ../../source/tutorial-quickstart-mlx.rst:259 msgid "" "Here, after updating the parameters, we perform the training as in the " "centralized case, and return the new parameters." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:275 +#: ../../source/tutorial-quickstart-mlx.rst:262 msgid "And for the ``evaluate()`` method of the client:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:285 +#: ../../source/tutorial-quickstart-mlx.rst:272 msgid "" "We also begin by updating the parameters with the ones sent by the " "server, and then we compute the loss and accuracy using the functions " @@ -22940,11 +23316,11 @@ msgid "" "the `MLP` model as well as other components such as the optimizer." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:290 +#: ../../source/tutorial-quickstart-mlx.rst:277 msgid "Putting everything together we have:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:344 +#: ../../source/tutorial-quickstart-mlx.rst:331 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " "defined above by means of a ``client_fn()`` callback. Note that " @@ -22955,7 +23331,7 @@ msgid "" "method." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:378 +#: ../../source/tutorial-quickstart-mlx.rst:363 msgid "" "To construct a ``ServerApp``, we define a ``server_fn()`` callback with " "an identical signature to that of ``client_fn()``, but the return type is" @@ -22966,15 +23342,15 @@ msgid "" "``FedAvg`` strategy." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:402 -#: ../../source/tutorial-quickstart-pytorch.rst:360 -#: ../../source/tutorial-quickstart-tensorflow.rst:279 +#: ../../source/tutorial-quickstart-mlx.rst:386 +#: ../../source/tutorial-quickstart-pytorch.rst:344 +#: ../../source/tutorial-quickstart-tensorflow.rst:266 msgid "" "Congratulations! You've successfully built and run your first federated " "learning system." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:407 +#: ../../source/tutorial-quickstart-mlx.rst:390 msgid "" "Check the `source code `_ of the extended version of this tutorial in ``examples" @@ -22987,15 +23363,15 @@ msgid "" "with Pandas to perform Federated Analytics." msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:5 +#: ../../source/tutorial-quickstart-pandas.rst:4 msgid "Quickstart Pandas" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:10 +#: ../../source/tutorial-quickstart-pandas.rst:9 msgid "Let's build a federated analytics system using Pandas and Flower!" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:12 +#: ../../source/tutorial-quickstart-pandas.rst:11 msgid "" "Please refer to the `full code example " "`_ " @@ -23008,7 +23384,7 @@ msgid "" "with PyTorch to train a CNN model on MNIST." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:7 +#: ../../source/tutorial-quickstart-pytorch.rst:6 msgid "" "In this federated learning tutorial we will learn how to train a " "Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " @@ -23016,7 +23392,7 @@ msgid "" ":doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:12 +#: ../../source/tutorial-quickstart-pytorch.rst:11 msgid "" "Let's use `flwr new` to create a complete Flower+PyTorch project. It will" " generate all the files needed to run, by default with the Flower " @@ -23028,14 +23404,14 @@ msgid "" "api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner>`_." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:27 +#: ../../source/tutorial-quickstart-pytorch.rst:26 msgid "" "Then, run the command below. You will be prompted to select one of the " "available templates (choose ``PyTorch``), give a name to your project, " "and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:121 +#: ../../source/tutorial-quickstart-pytorch.rst:117 msgid "" "This tutorial uses `Flower Datasets `_ " "to easily download and partition the `CIFAR-10` dataset. In this example " @@ -23049,13 +23425,13 @@ msgid "" " that correspond to their data partition." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:159 +#: ../../source/tutorial-quickstart-pytorch.rst:152 msgid "" "We defined a simple Convolutional Neural Network (CNN), but feel free to " "replace it with a more sophisticated model if you'd like:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:184 +#: ../../source/tutorial-quickstart-pytorch.rst:177 msgid "" "In addition to defining the model architecture, we also include two " "utility functions to perform both training (i.e. ``train()``) and " @@ -23068,7 +23444,7 @@ msgid "" "training or evaluation:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:236 +#: ../../source/tutorial-quickstart-pytorch.rst:226 msgid "" "The main changes we have to make to use `PyTorch` with `Flower` will be " "found in the ``get_weights()`` and ``set_weights()`` functions. In " @@ -23078,7 +23454,7 @@ msgid "" "PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:294 +#: ../../source/tutorial-quickstart-pytorch.rst:282 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " "defined above by means of a ``client_fn()`` callback. Note that the " @@ -23089,7 +23465,7 @@ msgid "" "additioinal hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:323 +#: ../../source/tutorial-quickstart-pytorch.rst:309 msgid "" "To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" " identical signature to that of ``client_fn()`` but the return type is " @@ -23103,31 +23479,31 @@ msgid "" "``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:365 +#: ../../source/tutorial-quickstart-pytorch.rst:348 msgid "" "Check the `source code `_ of the extended version of this tutorial in " "``examples/quickstart-pytorch`` in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:372 -#: ../../source/tutorial-quickstart-tensorflow.rst:295 +#: ../../source/tutorial-quickstart-pytorch.rst:354 +#: ../../source/tutorial-quickstart-tensorflow.rst:278 #, fuzzy msgid "Video tutorial" msgstr "튜토리얼" -#: ../../source/tutorial-quickstart-pytorch.rst:376 +#: ../../source/tutorial-quickstart-pytorch.rst:358 msgid "" "The video shown below shows how to setup a PyTorch + Flower project using" " our previously recommended APIs. A new video tutorial will be released " "that shows the new APIs (as the content above does)" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 msgid "Quickstart PyTorch Lightning" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:7 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 msgid "" "In this federated learning tutorial we will learn how to train an " "AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " @@ -23135,13 +23511,13 @@ msgid "" ":doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:20 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 msgid "" "This will create a new directory called `quickstart-pytorch-lightning` " "containing the following files:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:43 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 msgid "" "By default, Flower Simulation Engine will be started and it will create a" " federation of 4 nodes using `FedAvg `_ of this tutorial in ``examples" @@ -23171,14 +23547,14 @@ msgid "" "with scikit-learn to train a linear regression model." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:5 +#: ../../source/tutorial-quickstart-scikitlearn.rst:4 msgid "Quickstart scikit-learn" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +#: ../../source/tutorial-quickstart-scikitlearn.rst:9 msgid "" -"In this tutorial, we will learn how to train a :code:`Logistic " -"Regression` model on MNIST using Flower and scikit-learn." +"In this tutorial, we will learn how to train a ``Logistic Regression`` " +"model on MNIST using Flower and scikit-learn." msgstr "" #: ../../source/tutorial-quickstart-scikitlearn.rst:12 @@ -23187,13 +23563,13 @@ msgid "" "within this :doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +#: ../../source/tutorial-quickstart-scikitlearn.rst:15 msgid "" "Our example consists of one *server* and two *clients* all having the " "same model." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +#: ../../source/tutorial-quickstart-scikitlearn.rst:17 msgid "" "*Clients* are responsible for generating individual model parameter " "updates for the model based on their local datasets. These updates are " @@ -23203,189 +23579,190 @@ msgid "" "called a *round*." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +#: ../../source/tutorial-quickstart-scikitlearn.rst:23 msgid "" "Now that we have a rough idea of what is going on, let's get started. We " "first need to install Flower. You can do this by running:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:26 +#: ../../source/tutorial-quickstart-scikitlearn.rst:30 msgid "Since we want to use scikit-learn, let's go ahead and install it:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:32 +#: ../../source/tutorial-quickstart-scikitlearn.rst:36 msgid "Or simply install all dependencies using Poetry:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 msgid "" "Now that we have all our dependencies installed, let's run a simple " "distributed training with two clients and one server. However, before " "setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within :code:`utils.py`. The " -":code:`utils.py` contains different functions defining all the machine " +" we need for our federated learning setup within ``utils.py``. The " +"``utils.py`` contains different functions defining all the machine " "learning basics:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid ":code:`get_model_parameters()`" -msgstr "" +#: ../../source/tutorial-quickstart-scikitlearn.rst:51 +#, fuzzy +msgid "``get_model_parameters()``" +msgstr "모델 매개변수." -#: ../../source/tutorial-quickstart-scikitlearn.rst:46 -msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:47 -msgid ":code:`set_model_params()`" +#: ../../source/tutorial-quickstart-scikitlearn.rst:53 +msgid "``set_model_params()``" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:48 -msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/tutorial-quickstart-scikitlearn.rst:54 +msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid ":code:`set_initial_params()`" +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid "``set_initial_params()``" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 msgid "Initializes the model parameters that the Flower server will ask for" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#: ../../source/tutorial-quickstart-scikitlearn.rst:58 msgid "" -"Please check out :code:`utils.py` `here " +"Please check out ``utils.py`` `here " "`_ for more details. The pre-defined functions are used in" -" the :code:`client.py` and imported. The :code:`client.py` also requires " -"to import several packages such as Flower and scikit-learn:" +" the ``client.py`` and imported. The ``client.py`` also requires to " +"import several packages such as Flower and scikit-learn:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:67 +#: ../../source/tutorial-quickstart-scikitlearn.rst:75 msgid "" "Prior to local training, we need to load the MNIST dataset, a popular " "image classification dataset of handwritten digits for machine learning, " "and partition the dataset for FL. This can be conveniently achieved using" " `Flower Datasets `_. The " -":code:`FederatedDataset.load_partition()` method loads the partitioned " -"training set for each partition ID defined in the :code:`--partition-id` " +"``FederatedDataset.load_partition()`` method loads the partitioned " +"training set for each partition ID defined in the ``--partition-id`` " "argument." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:95 +#: ../../source/tutorial-quickstart-scikitlearn.rst:106 msgid "" "Next, the logistic regression model is defined and initialized with " -":code:`utils.set_initial_params()`." +"``utils.set_initial_params()``." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:107 +#: ../../source/tutorial-quickstart-scikitlearn.rst:119 msgid "" "The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to fit the logistic regression we defined earlier)." +"``Client``. When the server selects a particular client for training, it " +"sends training instructions over the network. The client receives those " +"instructions and calls one of the ``Client`` methods to run your code " +"(i.e., to fit the logistic regression we defined earlier)." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +#: ../../source/tutorial-quickstart-scikitlearn.rst:124 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " -"means defining the following methods (:code:`set_parameters` is optional " -"though):" +"Flower provides a convenience class called ``NumPyClient`` which makes it" +" easier to implement the ``Client`` interface when your workload uses " +"scikit-learn. Implementing ``NumPyClient`` usually means defining the " +"following methods (``set_parameters`` is optional though):" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +#: ../../source/tutorial-quickstart-scikitlearn.rst:130 msgid "return the model weight as a list of NumPy ndarrays" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 -msgid ":code:`set_parameters` (optional)" -msgstr "" +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +#, fuzzy +msgid "``set_parameters`` (optional)" +msgstr ":code:`set_parameters (선택사항)`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 msgid "" "update the local model weights with the parameters received from the " "server" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:122 -msgid "is directly imported with :code:`utils.set_model_params()`" +#: ../../source/tutorial-quickstart-scikitlearn.rst:133 +msgid "is directly imported with ``utils.set_model_params()``" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +#: ../../source/tutorial-quickstart-scikitlearn.rst:135 msgid "set the local model weights" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 msgid "train the local model" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:126 +#: ../../source/tutorial-quickstart-scikitlearn.rst:137 #, fuzzy msgid "return the updated local model weights" msgstr "현재 로컬 모델 파라미터를 반환합니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 msgid "test the local model" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 +#: ../../source/tutorial-quickstart-scikitlearn.rst:141 msgid "The methods can be implemented in the following way:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:153 +#: ../../source/tutorial-quickstart-scikitlearn.rst:163 msgid "" -"We can now create an instance of our class :code:`MnistClient` and add " -"one line to actually run this client:" +"We can now create an instance of our class ``MnistClient`` and add one " +"line to actually run this client:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:160 +#: ../../source/tutorial-quickstart-scikitlearn.rst:170 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" -" the client which server to connect to. In our case we can run the server" -" and the client on the same machine, therefore we use " -":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we pass to the client." +"That's it for the client. We only have to implement ``Client`` or " +"``NumPyClient`` and call ``fl.client.start_client()``. If you implement a" +" client of type ``NumPyClient`` you'll need to first call its " +"``to_client()`` method. The string ``\"0.0.0.0:8080\"`` tells the client " +"which server to connect to. In our case we can run the server and the " +"client on the same machine, therefore we use ``\"0.0.0.0:8080\"``. If we " +"run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" pass to the client." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:169 +#: ../../source/tutorial-quickstart-scikitlearn.rst:181 msgid "" "The following Flower server is a little bit more advanced and returns an " "evaluation function for the server-side evaluation. First, we import " "again all required libraries such as Flower and scikit-learn." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:172 -msgid ":code:`server.py`, import Flower and start the server:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +msgid "``server.py``, import Flower and start the server:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and" -" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " -"function is called after each federated learning round and gives you " -"information about loss and accuracy. Note that we also make use of Flower" -" Datasets here to load the test split of the MNIST dataset for server-" -"side evaluation." +"The number of federated learning rounds is set in ``fit_round()`` and the" +" evaluation is defined in ``get_evaluate_fn()``. The evaluation function " +"is called after each federated learning round and gives you information " +"about loss and accuracy. Note that we also make use of Flower Datasets " +"here to load the test split of the MNIST dataset for server-side " +"evaluation." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:213 +#: ../../source/tutorial-quickstart-scikitlearn.rst:228 msgid "" -"The :code:`main` contains the server-side parameter initialization " -":code:`utils.set_initial_params()` as well as the aggregation strategy " -":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " +"The ``main`` contains the server-side parameter initialization " +"``utils.set_initial_params()`` as well as the aggregation strategy " +"``fl.server.strategy:FedAvg()``. The strategy is the default one, " "federated averaging (or FedAvg), with two clients and evaluation after " "each federated learning round. The server can be started with the command" -" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +" ``fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))``." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:232 +#: ../../source/tutorial-quickstart-scikitlearn.rst:256 msgid "" "With both client and server ready, we can now run everything and see " "federated learning in action. Federated learning systems usually have a " @@ -23393,33 +23770,33 @@ msgid "" "first:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-xgboost.rst:575 +#: ../../source/tutorial-quickstart-scikitlearn.rst:264 +#: ../../source/tutorial-quickstart-xgboost.rst:598 msgid "" "Once the server is running we can start the clients in different " "terminals. Open a new terminal and start the first client:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-xgboost.rst:582 +#: ../../source/tutorial-quickstart-scikitlearn.rst:271 +#: ../../source/tutorial-quickstart-xgboost.rst:605 msgid "Open another terminal and start the second client:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:252 -#: ../../source/tutorial-quickstart-xgboost.rst:588 +#: ../../source/tutorial-quickstart-scikitlearn.rst:277 +#: ../../source/tutorial-quickstart-xgboost.rst:611 msgid "" "Each client will have its own dataset. You should now see how the " "training does in the very first terminal (the one that started the " "server):" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:286 +#: ../../source/tutorial-quickstart-scikitlearn.rst:311 msgid "" "Congratulations! You've successfully built and run your first federated " "learning system. The full `source code " "`_ for this example can be found in :code:`examples/sklearn-logreg-" -"mnist`." +"mnist>`_ for this example can be found in ``examples/sklearn-logreg-" +"mnist``." msgstr "" #: ../../source/tutorial-quickstart-tensorflow.rst:-1 @@ -23428,11 +23805,11 @@ msgid "" "with TensorFlow to train a CNN model on CIFAR-10." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:5 +#: ../../source/tutorial-quickstart-tensorflow.rst:4 msgid "Quickstart TensorFlow" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:7 +#: ../../source/tutorial-quickstart-tensorflow.rst:6 msgid "" "In this tutorial we will learn how to train a Convolutional Neural " "Network on CIFAR-10 using the Flower framework and TensorFlow. First of " @@ -23440,7 +23817,7 @@ msgid "" " within a :doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:13 +#: ../../source/tutorial-quickstart-tensorflow.rst:11 msgid "" "Let's use `flwr new` to create a complete Flower+TensorFlow project. It " "will generate all the files needed to run, by default with the Flower " @@ -23452,14 +23829,14 @@ msgid "" "api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner>`_." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:28 +#: ../../source/tutorial-quickstart-tensorflow.rst:26 msgid "" "Then, run the command below. You will be prompted to select one of the " "available templates (choose ``TensorFlow``), give a name to your project," " and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:118 +#: ../../source/tutorial-quickstart-tensorflow.rst:114 msgid "" "This tutorial uses `Flower Datasets `_ " "to easily download and partition the `CIFAR-10` dataset. In this example " @@ -23473,14 +23850,14 @@ msgid "" " correspond to their data partition." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:147 +#: ../../source/tutorial-quickstart-tensorflow.rst:141 msgid "" "Next, we need a model. We defined a simple Convolutional Neural Network " "(CNN), but feel free to replace it with a more sophisticated model if " "you'd like:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:178 +#: ../../source/tutorial-quickstart-tensorflow.rst:170 msgid "" "With `TensorFlow`, we can use the built-in ``get_weights()`` and " "``set_weights()`` functions, which simplifies the implementation with " @@ -23491,7 +23868,7 @@ msgid "" "set that the client might have:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:212 +#: ../../source/tutorial-quickstart-tensorflow.rst:203 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " "defined above by means of a ``client_fn()`` callback. Note that the " @@ -23503,7 +23880,7 @@ msgid "" "``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:247 +#: ../../source/tutorial-quickstart-tensorflow.rst:234 msgid "" "To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" " identical signature to that of ``client_fn()`` but the return type is " @@ -23515,13 +23892,13 @@ msgid "" "the global model to federate." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:284 +#: ../../source/tutorial-quickstart-tensorflow.rst:270 msgid "" "Check the source code of the extended version of this tutorial in " "|quickstart_tf_link|_ in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:299 +#: ../../source/tutorial-quickstart-tensorflow.rst:282 msgid "" "The video shown below shows how to setup a TensorFlow + Flower project " "using our previously recommended APIs. A new video tutorial will be " @@ -23534,15 +23911,15 @@ msgid "" "with XGBoost to train classification models on trees." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:5 +#: ../../source/tutorial-quickstart-xgboost.rst:4 msgid "Quickstart XGBoost" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:14 +#: ../../source/tutorial-quickstart-xgboost.rst:13 msgid "Federated XGBoost" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:16 +#: ../../source/tutorial-quickstart-xgboost.rst:15 msgid "" "EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " "implementation of gradient-boosted decision tree (**GBDT**), that " @@ -23552,18 +23929,18 @@ msgid "" "concurrently, unlike the sequential approach taken by GBDT." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:20 +#: ../../source/tutorial-quickstart-xgboost.rst:21 msgid "" "Often, for tabular data on medium-sized datasets with fewer than 10k " "training examples, XGBoost surpasses the results of deep learning " "techniques." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:23 +#: ../../source/tutorial-quickstart-xgboost.rst:25 msgid "Why federated XGBoost?" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:25 +#: ../../source/tutorial-quickstart-xgboost.rst:27 msgid "" "Indeed, as the demand for data privacy and decentralized learning grows, " "there's an increasing requirement to implement federated XGBoost systems " @@ -23571,7 +23948,7 @@ msgid "" "detection." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/tutorial-quickstart-xgboost.rst:31 msgid "" "Federated learning ensures that raw data remains on the local device, " "making it an attractive approach for sensitive domains where data " @@ -23580,10 +23957,10 @@ msgid "" "solution for these specific challenges." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:30 +#: ../../source/tutorial-quickstart-xgboost.rst:36 msgid "" "In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " +"HIGGS dataset using Flower and ``xgboost`` package. We use a simple " "example (`full code xgboost-quickstart " "`_)" " with two *clients* and one *server* to demonstrate how federated XGBoost" @@ -23592,30 +23969,30 @@ msgid "" "comprehensive>`_) to run various experiments." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:37 +#: ../../source/tutorial-quickstart-xgboost.rst:46 msgid "Environment Setup" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:39 +#: ../../source/tutorial-quickstart-xgboost.rst:48 msgid "" "First of all, it is recommended to create a virtual environment and run " "everything within a :doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:41 +#: ../../source/tutorial-quickstart-xgboost.rst:51 msgid "" "We first need to install Flower and Flower Datasets. You can do this by " "running :" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:47 +#: ../../source/tutorial-quickstart-xgboost.rst:57 msgid "" -"Since we want to use :code:`xgboost` package to build up XGBoost trees, " -"let's go ahead and install :code:`xgboost`:" +"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" +" go ahead and install ``xgboost``:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:57 +#: ../../source/tutorial-quickstart-xgboost.rst:67 msgid "" "*Clients* are responsible for generating individual weight-updates for " "the model based on their local datasets. Now that we have all our " @@ -23623,219 +24000,215 @@ msgid "" "clients and one server." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:60 +#: ../../source/tutorial-quickstart-xgboost.rst:71 msgid "" -"In a file called :code:`client.py`, import xgboost, Flower, Flower " -"Datasets and other related functions:" +"In a file called ``client.py``, import xgboost, Flower, Flower Datasets " +"and other related functions:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:87 +#: ../../source/tutorial-quickstart-xgboost.rst:99 msgid "Dataset partition and hyper-parameter selection" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:89 +#: ../../source/tutorial-quickstart-xgboost.rst:101 msgid "" "Prior to local training, we require loading the HIGGS dataset from Flower" " Datasets and conduct data partitioning for FL:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:102 +#: ../../source/tutorial-quickstart-xgboost.rst:115 msgid "" "In this example, we split the dataset into 30 partitions with uniform " -"distribution (:code:`IidPartitioner(num_partitions=30)`). Then, we load " -"the partition for the given client based on :code:`partition_id`:" +"distribution (``IidPartitioner(num_partitions=30)``). Then, we load the " +"partition for the given client based on ``partition_id``:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:121 +#: ../../source/tutorial-quickstart-xgboost.rst:135 msgid "" "After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for :code:`xgboost` package." +"local data), and transform data format for ``xgboost`` package." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:134 +#: ../../source/tutorial-quickstart-xgboost.rst:149 msgid "" -"The functions of :code:`train_test_split` and " -":code:`transform_dataset_to_dmatrix` are defined as below:" +"The functions of ``train_test_split`` and " +"``transform_dataset_to_dmatrix`` are defined as below:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:158 +#: ../../source/tutorial-quickstart-xgboost.rst:174 msgid "Finally, we define the hyper-parameters used for XGBoost training." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:174 +#: ../../source/tutorial-quickstart-xgboost.rst:190 msgid "" -"The :code:`num_local_round` represents the number of iterations for local" -" tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " -"evaluation metric." +"The ``num_local_round`` represents the number of iterations for local " +"tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " +"metric." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:181 +#: ../../source/tutorial-quickstart-xgboost.rst:195 msgid "Flower client definition for XGBoost" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:183 +#: ../../source/tutorial-quickstart-xgboost.rst:197 msgid "" "After loading the dataset we define the Flower client. We follow the " -"general rule to define :code:`XgbClient` class inherited from " -":code:`fl.client.Client`." +"general rule to define ``XgbClient`` class inherited from " +"``fl.client.Client``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:205 +#: ../../source/tutorial-quickstart-xgboost.rst:219 msgid "" -"All required parameters defined above are passed to :code:`XgbClient`'s " +"All required parameters defined above are passed to ``XgbClient``'s " "constructor." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:207 +#: ../../source/tutorial-quickstart-xgboost.rst:221 msgid "" -"Then, we override :code:`get_parameters`, :code:`fit` and " -":code:`evaluate` methods insides :code:`XgbClient` class as follows." +"Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods " +"insides ``XgbClient`` class as follows." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:221 +#: ../../source/tutorial-quickstart-xgboost.rst:236 msgid "" "Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use " -":code:`get_parameters` and :code:`set_parameters` to initialise model " -"parameters for XGBoost. As a result, let's return an empty tensor in " -":code:`get_parameters` when it is called by the server at the first " -"round." +"specified random weights. In this case, we do not use ``get_parameters`` " +"and ``set_parameters`` to initialise model parameters for XGBoost. As a " +"result, let's return an empty tensor in ``get_parameters`` when it is " +"called by the server at the first round." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:262 +#: ../../source/tutorial-quickstart-xgboost.rst:278 msgid "" -"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " -"up the first set of trees. From the second round, we load the global " -"model sent from server to new build Booster object, and then update model" -" weights on local training data with function :code:`local_boost` as " -"follows:" +"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " +"first set of trees. From the second round, we load the global model sent " +"from server to new build Booster object, and then update model weights on" +" local training data with function ``local_boost`` as follows:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:281 +#: ../../source/tutorial-quickstart-xgboost.rst:298 msgid "" -"Given :code:`num_local_round`, we update trees by calling " -":code:`bst_input.update` method. After training, the last " -":code:`N=num_local_round` trees will be extracted to send to the server." +"Given ``num_local_round``, we update trees by calling " +"``bst_input.update`` method. After training, the last " +"``N=num_local_round`` trees will be extracted to send to the server." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:313 +#: ../../source/tutorial-quickstart-xgboost.rst:330 msgid "" -"In :code:`evaluate`, after loading the global model, we call " -":code:`bst.eval_set` function to conduct evaluation on valid set. The AUC" -" value will be returned." +"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" +" function to conduct evaluation on valid set. The AUC value will be " +"returned." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:316 +#: ../../source/tutorial-quickstart-xgboost.rst:333 msgid "" -"Now, we can create an instance of our class :code:`XgbClient` and add one" -" line to actually run this client:" +"Now, we can create an instance of our class ``XgbClient`` and add one " +"line to actually run this client:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:332 +#: ../../source/tutorial-quickstart-xgboost.rst:350 msgid "" -"That's it for the client. We only have to implement :code:`Client` and " -"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"That's it for the client. We only have to implement ``Client`` and call " +"``fl.client.start_client()``. The string ``\"[::]:8080\"`` tells the " +"client which server to connect to. In our case we can run the server and " +"the client on the same machine, therefore we use ``\"[::]:8080\"``. If we" +" run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" point the client at." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:343 +#: ../../source/tutorial-quickstart-xgboost.rst:360 msgid "" "These updates are then sent to the *server* which will aggregate them to " "produce a better model. Finally, the *server* sends this improved version" " of the model back to each *client* to finish a complete FL round." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:346 +#: ../../source/tutorial-quickstart-xgboost.rst:364 msgid "" -"In a file named :code:`server.py`, import Flower and FedXgbBagging from " -":code:`flwr.server.strategy`." +"In a file named ``server.py``, import Flower and FedXgbBagging from " +"``flwr.server.strategy``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:348 +#: ../../source/tutorial-quickstart-xgboost.rst:367 msgid "We first define a strategy for XGBoost bagging aggregation." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:380 +#: ../../source/tutorial-quickstart-xgboost.rst:401 msgid "" -"We use two clients for this example. An " -":code:`evaluate_metrics_aggregation` function is defined to collect and " -"wighted average the AUC values from clients. The :code:`config_func` " -"function is to return the current FL round number to client's " -":code:`fit()` and :code:`evaluate()` methods." +"We use two clients for this example. An ``evaluate_metrics_aggregation`` " +"function is defined to collect and wighted average the AUC values from " +"clients. The ``config_func`` function is to return the current FL round " +"number to client's ``fit()`` and ``evaluate()`` methods." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:384 +#: ../../source/tutorial-quickstart-xgboost.rst:406 msgid "Then, we start the server:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:396 +#: ../../source/tutorial-quickstart-xgboost.rst:418 msgid "Tree-based bagging aggregation" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:398 +#: ../../source/tutorial-quickstart-xgboost.rst:420 msgid "" "You must be curious about how bagging aggregation works. Let's look into " "the details." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:400 +#: ../../source/tutorial-quickstart-xgboost.rst:422 msgid "" -"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " -":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." -" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " -"and :code:`evaluate` methods as follows:" +"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " +"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " +"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " +"``evaluate`` methods as follows:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:496 +#: ../../source/tutorial-quickstart-xgboost.rst:519 msgid "" -"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " -"trees by calling :code:`aggregate()` function:" +"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " +"trees by calling ``aggregate()`` function:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:555 +#: ../../source/tutorial-quickstart-xgboost.rst:579 msgid "" "In this function, we first fetch the number of trees and the number of " "parallel trees for the current and previous model by calling " -":code:`_get_tree_nums`. Then, the fetched information will be aggregated." -" After that, the trees (containing model weights) are aggregated to " +"``_get_tree_nums``. Then, the fetched information will be aggregated. " +"After that, the trees (containing model weights) are aggregated to " "generate a new tree model." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:560 +#: ../../source/tutorial-quickstart-xgboost.rst:584 msgid "" "After traversal of all clients' models, a new global model is generated, " "followed by the serialisation, and sending back to each client." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:565 +#: ../../source/tutorial-quickstart-xgboost.rst:588 msgid "Launch Federated XGBoost!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:641 +#: ../../source/tutorial-quickstart-xgboost.rst:664 msgid "" "Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in " -":code:`metrics_distributed`. One can see that the average AUC increases " -"over FL rounds." +"XGBoost system. The AUC values can be checked in ``metrics_distributed``." +" One can see that the average AUC increases over FL rounds." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:646 +#: ../../source/tutorial-quickstart-xgboost.rst:668 msgid "" "The full `source code `_ for this example can be found in :code:`examples" -"/xgboost-quickstart`." +"/xgboost-quickstart/>`_ for this example can be found in ``examples" +"/xgboost-quickstart``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:650 +#: ../../source/tutorial-quickstart-xgboost.rst:673 msgid "Comprehensive Federated XGBoost" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:652 +#: ../../source/tutorial-quickstart-xgboost.rst:675 msgid "" "Now that you have known how federated XGBoost work with Flower, it's time" " to run some more comprehensive experiments by customising the " @@ -23848,11 +24221,11 @@ msgid "" "client cohorts in a resource-aware manner. Let's take a look!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:659 +#: ../../source/tutorial-quickstart-xgboost.rst:685 msgid "Cyclic training" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:661 +#: ../../source/tutorial-quickstart-xgboost.rst:687 msgid "" "In addition to bagging aggregation, we offer a cyclic training scheme, " "which performs FL in a client-by-client fashion. Instead of aggregating " @@ -23862,178 +24235,176 @@ msgid "" "for next round's boosting." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:665 -msgid "" -"To do this, we first customise a :code:`ClientManager` in " -":code:`server_utils.py`:" +#: ../../source/tutorial-quickstart-xgboost.rst:693 +msgid "To do this, we first customise a ``ClientManager`` in ``server_utils.py``:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:705 +#: ../../source/tutorial-quickstart-xgboost.rst:733 msgid "" -"The customised :code:`ClientManager` samples all available clients in " -"each FL round based on the order of connection to the server. Then, we " -"define a new strategy :code:`FedXgbCyclic` in " -":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " +"The customised ``ClientManager`` samples all available clients in each FL" +" round based on the order of connection to the server. Then, we define a " +"new strategy ``FedXgbCyclic`` in " +"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " "select only one client in given round and pass the received model to next" " client." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:746 +#: ../../source/tutorial-quickstart-xgboost.rst:775 msgid "" -"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " +"Unlike the original ``FedAvg``, we don't perform aggregation here. " "Instead, we just make a copy of the received client model as global model" -" by overriding :code:`aggregate_fit`." +" by overriding ``aggregate_fit``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:749 +#: ../../source/tutorial-quickstart-xgboost.rst:778 msgid "" -"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" -" methods ensure the clients to be sequentially selected given FL round:" +"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" +" ensure the clients to be sequentially selected given FL round:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:813 +#: ../../source/tutorial-quickstart-xgboost.rst:840 msgid "Customised data partitioning" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:815 +#: ../../source/tutorial-quickstart-xgboost.rst:842 msgid "" -"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" -" to instantiate the data partitioner based on the given " -":code:`num_partitions` and :code:`partitioner_type`. Currently, we " -"provide four supported partitioner type to simulate the uniformity/non-" -"uniformity in data quantity (uniform, linear, square, exponential)." +"In ``dataset.py``, we have a function ``instantiate_partitioner`` to " +"instantiate the data partitioner based on the given ``num_partitions`` " +"and ``partitioner_type``. Currently, we provide four supported " +"partitioner type to simulate the uniformity/non-uniformity in data " +"quantity (uniform, linear, square, exponential)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:846 +#: ../../source/tutorial-quickstart-xgboost.rst:873 msgid "Customised centralised/distributed evaluation" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:848 +#: ../../source/tutorial-quickstart-xgboost.rst:875 msgid "" "To facilitate centralised evaluation, we define a function in " -":code:`server_utils.py`:" +"``server_utils.py``:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:880 +#: ../../source/tutorial-quickstart-xgboost.rst:907 msgid "" "This function returns a evaluation function which instantiates a " -":code:`Booster` object and loads the global model weights to it. The " -"evaluation is conducted by calling :code:`eval_set()` method, and the " -"tested AUC value is reported." +"``Booster`` object and loads the global model weights to it. The " +"evaluation is conducted by calling ``eval_set()`` method, and the tested " +"AUC value is reported." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:883 +#: ../../source/tutorial-quickstart-xgboost.rst:911 msgid "" "As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the :code:`evaluate()` method insides the " -":code:`XgbClient` class in :code:`client_utils.py`." +"start example by overriding the ``evaluate()`` method insides the " +"``XgbClient`` class in ``client_utils.py``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:887 +#: ../../source/tutorial-quickstart-xgboost.rst:916 msgid "Flower simulation" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:888 +#: ../../source/tutorial-quickstart-xgboost.rst:918 msgid "" -"We also provide an example code (:code:`sim.py`) to use the simulation " +"We also provide an example code (``sim.py``) to use the simulation " "capabilities of Flower to simulate federated XGBoost training on either a" " single machine or a cluster of machines." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:922 +#: ../../source/tutorial-quickstart-xgboost.rst:954 msgid "" -"After importing all required packages, we define a :code:`main()` " -"function to perform the simulation process:" +"After importing all required packages, we define a ``main()`` function to" +" perform the simulation process:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:977 +#: ../../source/tutorial-quickstart-xgboost.rst:1010 msgid "" "We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a :code:`list`. After the simulation begins, " -"the clients won't need to pre-process their partitions again." +"processed data is stored in a ``list``. After the simulation begins, the " +"clients won't need to pre-process their partitions again." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:980 +#: ../../source/tutorial-quickstart-xgboost.rst:1014 msgid "Then, we define the strategies and other hyper-parameters:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1031 +#: ../../source/tutorial-quickstart-xgboost.rst:1065 msgid "" "After that, we start the simulation by calling " -":code:`fl.simulation.start_simulation`:" +"``fl.simulation.start_simulation``:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1051 +#: ../../source/tutorial-quickstart-xgboost.rst:1085 msgid "" -"One of key parameters for :code:`start_simulation` is :code:`client_fn` " -"which returns a function to construct a client. We define it as follows:" +"One of key parameters for ``start_simulation`` is ``client_fn`` which " +"returns a function to construct a client. We define it as follows:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1094 +#: ../../source/tutorial-quickstart-xgboost.rst:1126 msgid "Arguments parser" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1096 +#: ../../source/tutorial-quickstart-xgboost.rst:1128 msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients, server " -"and simulation, allowing users to specify different experimental " -"settings. Let's first see the sever side:" +"In ``utils.py``, we define the arguments parsers for clients, server and " +"simulation, allowing users to specify different experimental settings. " +"Let's first see the sever side:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1142 +#: ../../source/tutorial-quickstart-xgboost.rst:1175 msgid "" "This allows user to specify training strategies / the number of total " "clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" -" will do centralised evaluation and all functionalities for client " +" evaluation fashion. Note that with ``--centralised-eval``, the sever " +"will do centralised evaluation and all functionalities for client " "evaluation will be disabled." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1146 +#: ../../source/tutorial-quickstart-xgboost.rst:1180 msgid "Then, the argument parser on client side:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1200 +#: ../../source/tutorial-quickstart-xgboost.rst:1234 msgid "" "This defines various options for client data partitioning. Besides, " "clients also have an option to conduct evaluation on centralised test set" -" by setting :code:`--centralised-eval`, as well as an option to perform " -"scaled learning rate based on the number of clients by setting :code" -":`--scaled-lr`." +" by setting ``--centralised-eval``, as well as an option to perform " +"scaled learning rate based on the number of clients by setting " +"``--scaled-lr``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1204 +#: ../../source/tutorial-quickstart-xgboost.rst:1239 msgid "We also have an argument parser for simulation:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1282 +#: ../../source/tutorial-quickstart-xgboost.rst:1317 msgid "This integrates all arguments for both client and server sides." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1285 +#: ../../source/tutorial-quickstart-xgboost.rst:1320 msgid "Example commands" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1287 +#: ../../source/tutorial-quickstart-xgboost.rst:1322 msgid "" "To run a centralised evaluated experiment with bagging strategy on 5 " "clients with exponential distribution for 50 rounds, we first start the " "server as below:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1294 +#: ../../source/tutorial-quickstart-xgboost.rst:1329 msgid "Then, on each client terminal, we start the clients:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1300 +#: ../../source/tutorial-quickstart-xgboost.rst:1335 msgid "To run the same experiment with Flower simulation:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1306 +#: ../../source/tutorial-quickstart-xgboost.rst:1341 msgid "" "The full `code `_ for this comprehensive example can be found in" -" :code:`examples/xgboost-comprehensive`." +" ``examples/xgboost-comprehensive``." msgstr "" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 @@ -25394,7 +25765,7 @@ msgstr "" " 수도 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +msgid "|3a7aceef05f0421794726ac54aaf12fd|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 @@ -25411,7 +25782,7 @@ msgstr "" " 바둑과 같은 게임을 하는 것일 수 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|33cacb7d985c4906b348515c1a5cd993|" +msgid "|d741075f8e624331b42c0746f7d258a0|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 @@ -25434,7 +25805,7 @@ msgstr "" "부르리는 것을 듣는 스마트 스피커에서 비롯됩니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|cc080a555947492fa66131dc3a967603|" +msgid "|8fc92d668bcb42b8bda55143847f2329|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 @@ -25452,7 +25823,7 @@ msgstr "" "있습니다. 하지만 여러 조직이 모두 같은 작업을 위해 데이터를 생성하는 것일 수도 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|085c3e0fb8664c6aa06246636524b20b|" +msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 @@ -25470,7 +25841,7 @@ msgstr "" "서버는 데이터 센터 어딘가에 있을 수도 있고 클라우드 어딘가에 있을 수도 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|bfe69c74e48c45d49b50251c38c2a019|" +msgid "|77a037b546a84262b608e04bc82a2c96|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 @@ -25487,7 +25858,7 @@ msgstr "" " 우리가 기본적으로 사용해 온 머신러닝 방법입니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +msgid "|f568e24c9fb0435690ac628210a4be96|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 @@ -25509,7 +25880,7 @@ msgstr "" "트래픽을 분석하는 것이 있습니다. 이러한 사례에서 모든 데이터는 자연스럽게 중앙 서버에 존재합니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|163117eb654a4273babba413cf8065f5|" +msgid "|a7bf029981514e2593aa3a2b48c9d76a|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 @@ -25526,7 +25897,7 @@ msgstr "" "좋은 모델을 훈련하기에 충분하지 않을 수 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +msgid "|3f645ad807f84be8b1f8f3267173939c|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 @@ -25693,7 +26064,7 @@ msgstr "" "체크포인트에서 모델 매개변수를 초기화합니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|f403fcd69e4e44409627e748b404c086|" +msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 @@ -25720,7 +26091,7 @@ msgstr "" "개의 연결 노드만 사용합니다. 그 이유는 점점 더 많은 클라이언트 노드를 선택하면 학습의 효율성이 감소하기 때문입니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|4b00fe63870145968f8443619a792a42|" +msgid "|edcf9a04d96e42608fd01a333375febe|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 @@ -25747,7 +26118,7 @@ msgstr "" "데이터에서 한 단계 정도로 짧거나 몇 단계(mini-batches)에 불과할 수 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|368378731066486fa4397e89bc6b870c|" +msgid "|3dae22fe797043968e2b7aa7073c78bd|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 @@ -25773,7 +26144,7 @@ msgstr "" "보냅니다. 보내는 모델 업데이트는 전체 모델 파라미터거나 로컬 교육 중에 누적된 그레디언트(gradient)일 수 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|a66aa83d85bf4ffba7ed660b718066da|" +msgid "|ba178f75267d4ad8aa7363f20709195f|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 @@ -25822,7 +26193,7 @@ msgstr "" "많은 영향을 미칩니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|82324b9af72a4582a81839d55caab767|" +msgid "|c380c750bfd2444abce039a1c6fa8e60|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 @@ -25941,7 +26312,7 @@ msgstr "" "사용자는 모든 워크로드, 머신러닝 프레임워크 및 모든 프로그래밍 언어를 통합할 수 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +msgid "|e7cec00a114b48359935c6510595132e|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 @@ -28121,3 +28492,767 @@ msgstr "" #~ msgid "|ed9aae51da70428eab7eef32f21e819e|" #~ msgstr "" +#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +#~ msgstr "" + +#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" +#~ msgstr "" + +#~ msgid "|cc080a555947492fa66131dc3a967603|" +#~ msgstr "" + +#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" +#~ msgstr "" + +#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" +#~ msgstr "" + +#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +#~ msgstr "" + +#~ msgid "|163117eb654a4273babba413cf8065f5|" +#~ msgstr "" + +#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +#~ msgstr "" + +#~ msgid "|f403fcd69e4e44409627e748b404c086|" +#~ msgstr "" + +#~ msgid "|4b00fe63870145968f8443619a792a42|" +#~ msgstr "" + +#~ msgid "|368378731066486fa4397e89bc6b870c|" +#~ msgstr "" + +#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" +#~ msgstr "" + +#~ msgid "|82324b9af72a4582a81839d55caab767|" +#~ msgstr "" + +#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +#~ msgstr "" + +#~ msgid "" +#~ "Some quickstart examples may have " +#~ "limitations or requirements that prevent " +#~ "them from running on every environment." +#~ " For more information, please see " +#~ "`Limitations`_." +#~ msgstr "" + +#~ msgid "" +#~ "Change the application code. For " +#~ "example, change the ``seed`` in " +#~ "``quickstart_docker/task.py`` to ``43`` and " +#~ "save it:" +#~ msgstr "" + +#~ msgid ":code:`fit`" +#~ msgstr ":code:`fit`" + +#~ msgid "" +#~ "Note that since version :code:`1.11.0`, " +#~ ":code:`flower-server-app` no longer " +#~ "supports passing a reference to a " +#~ "`ServerApp` attribute. Instead, you need " +#~ "to pass the path to Flower app " +#~ "via the argument :code:`--app`. This is" +#~ " the path to a directory containing" +#~ " a `pyproject.toml`. You can create a" +#~ " valid Flower app by executing " +#~ ":code:`flwr new` and following the " +#~ "prompt." +#~ msgstr "" + +#~ msgid "" +#~ "The following examples are available as" +#~ " standalone projects. Quickstart TensorFlow/Keras" +#~ " ---------------------------" +#~ msgstr "" + +#~ msgid "" +#~ "Let's create a new application project" +#~ " in Xcode and add :code:`flwr` as " +#~ "a dependency in your project. For " +#~ "our application, we will store the " +#~ "logic of our app in " +#~ ":code:`FLiOSModel.swift` and the UI elements" +#~ " in :code:`ContentView.swift`. We will " +#~ "focus more on :code:`FLiOSModel.swift` in " +#~ "this quickstart. Please refer to the " +#~ "`full code example " +#~ "`_ to " +#~ "learn more about the app." +#~ msgstr "" + +#~ msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#~ msgstr "" + +#~ msgid "" +#~ "Then add the mlmodel to the " +#~ "project simply by drag-and-drop, " +#~ "the mlmodel will be bundled inside " +#~ "the application during deployment to " +#~ "your iOS device. We need to pass" +#~ " the url to access mlmodel and " +#~ "run CoreML machine learning processes, " +#~ "it can be retrieved by calling the" +#~ " function :code:`Bundle.main.url`. For the " +#~ "MNIST dataset, we need to preprocess " +#~ "it into :code:`MLBatchProvider` object. The" +#~ " preprocessing is done inside " +#~ ":code:`DataLoader.swift`." +#~ msgstr "" + +#~ msgid "" +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this information beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." +#~ msgstr "" + +#~ msgid "" +#~ "Then start the Flower gRPC client " +#~ "and start communicating to the server" +#~ " by passing our Flower client to " +#~ "the function :code:`startFlwrGRPC`." +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ "call the provided :code:`MLFlwrClient` and " +#~ "call :code:`startFlwrGRPC()`. The attribute " +#~ ":code:`hostname` and :code:`port` tells the" +#~ " client which server to connect to." +#~ " This can be done by entering " +#~ "the hostname and port in the " +#~ "application before clicking the start " +#~ "button to start the federated learning" +#~ " process." +#~ msgstr "" + +#~ msgid "" +#~ "For simple workloads we can start " +#~ "a Flower server and leave all the" +#~ " configuration possibilities at their " +#~ "default values. In a file named " +#~ ":code:`server.py`, import Flower and start " +#~ "the server:" +#~ msgstr "" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system in your ios device. The " +#~ "full `source code " +#~ "`_ for" +#~ " this example can be found in " +#~ ":code:`examples/ios`." +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial, we will learn " +#~ "how to train a :code:`Logistic " +#~ "Regression` model on MNIST using Flower" +#~ " and scikit-learn." +#~ msgstr "" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. However, before" +#~ " setting up the client and server," +#~ " we will define all functionalities " +#~ "that we need for our federated " +#~ "learning setup within :code:`utils.py`. The" +#~ " :code:`utils.py` contains different functions" +#~ " defining all the machine learning " +#~ "basics:" +#~ msgstr "" + +#~ msgid ":code:`get_model_parameters()`" +#~ msgstr "" + +#~ msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +#~ msgstr "" + +#~ msgid ":code:`set_model_params()`" +#~ msgstr "" + +#~ msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +#~ msgstr "" + +#~ msgid ":code:`set_initial_params()`" +#~ msgstr "" + +#~ msgid "" +#~ "Please check out :code:`utils.py` `here " +#~ "`_ for more details. " +#~ "The pre-defined functions are used " +#~ "in the :code:`client.py` and imported. " +#~ "The :code:`client.py` also requires to " +#~ "import several packages such as Flower" +#~ " and scikit-learn:" +#~ msgstr "" + +#~ msgid "" +#~ "Prior to local training, we need " +#~ "to load the MNIST dataset, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning, " +#~ "and partition the dataset for FL. " +#~ "This can be conveniently achieved using" +#~ " `Flower Datasets `_." +#~ " The :code:`FederatedDataset.load_partition()` method" +#~ " loads the partitioned training set " +#~ "for each partition ID defined in " +#~ "the :code:`--partition-id` argument." +#~ msgstr "" + +#~ msgid "" +#~ "Next, the logistic regression model is" +#~ " defined and initialized with " +#~ ":code:`utils.set_initial_params()`." +#~ msgstr "" + +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to fit the logistic " +#~ "regression we defined earlier)." +#~ msgstr "" + +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "scikit-learn. Implementing :code:`NumPyClient` " +#~ "usually means defining the following " +#~ "methods (:code:`set_parameters` is optional " +#~ "though):" +#~ msgstr "" + +#~ msgid ":code:`set_parameters` (optional)" +#~ msgstr "" + +#~ msgid "is directly imported with :code:`utils.set_model_params()`" +#~ msgstr "" + +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`MnistClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" + +#~ msgid ":code:`server.py`, import Flower and start the server:" +#~ msgstr "" + +#~ msgid "" +#~ "The number of federated learning rounds" +#~ " is set in :code:`fit_round()` and " +#~ "the evaluation is defined in " +#~ ":code:`get_evaluate_fn()`. The evaluation function" +#~ " is called after each federated " +#~ "learning round and gives you information" +#~ " about loss and accuracy. Note that" +#~ " we also make use of Flower " +#~ "Datasets here to load the test " +#~ "split of the MNIST dataset for " +#~ "server-side evaluation." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`main` contains the server-" +#~ "side parameter initialization " +#~ ":code:`utils.set_initial_params()` as well as " +#~ "the aggregation strategy " +#~ ":code:`fl.server.strategy:FedAvg()`. The strategy is" +#~ " the default one, federated averaging " +#~ "(or FedAvg), with two clients and " +#~ "evaluation after each federated learning " +#~ "round. The server can be started " +#~ "with the command " +#~ ":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +#~ "strategy=strategy, " +#~ "config=fl.server.ServerConfig(num_rounds=3))`." +#~ msgstr "" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/sklearn-logreg-" +#~ "mnist`." +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a federated XGBoost model" +#~ " on HIGGS dataset using Flower and" +#~ " :code:`xgboost` package. We use a " +#~ "simple example (`full code xgboost-" +#~ "quickstart `_) with two *clients* " +#~ "and one *server* to demonstrate how " +#~ "federated XGBoost works, and then we " +#~ "dive into a more complex example " +#~ "(`full code xgboost-comprehensive " +#~ "`_) to run various experiments." +#~ msgstr "" + +#~ msgid "" +#~ "Since we want to use :code:`xgboost` " +#~ "package to build up XGBoost trees, " +#~ "let's go ahead and install " +#~ ":code:`xgboost`:" +#~ msgstr "" + +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import xgboost, Flower, Flower Datasets " +#~ "and other related functions:" +#~ msgstr "" + +#~ msgid "" +#~ "In this example, we split the " +#~ "dataset into 30 partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=30)`)." +#~ " Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`partition_id`:" +#~ msgstr "" + +#~ msgid "" +#~ "After that, we do train/test splitting" +#~ " on the given partition (client's " +#~ "local data), and transform data format" +#~ " for :code:`xgboost` package." +#~ msgstr "" + +#~ msgid "" +#~ "The functions of :code:`train_test_split` and" +#~ " :code:`transform_dataset_to_dmatrix` are defined " +#~ "as below:" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`num_local_round` represents the " +#~ "number of iterations for local tree " +#~ "boost. We use CPU for the training" +#~ " in default. One can shift it " +#~ "to GPU by setting :code:`tree_method` to" +#~ " :code:`gpu_hist`. We use AUC as " +#~ "evaluation metric." +#~ msgstr "" + +#~ msgid "" +#~ "After loading the dataset we define " +#~ "the Flower client. We follow the " +#~ "general rule to define :code:`XgbClient` " +#~ "class inherited from :code:`fl.client.Client`." +#~ msgstr "" + +#~ msgid "" +#~ "All required parameters defined above " +#~ "are passed to :code:`XgbClient`'s constructor." +#~ msgstr "" + +#~ msgid "" +#~ "Then, we override :code:`get_parameters`, " +#~ ":code:`fit` and :code:`evaluate` methods " +#~ "insides :code:`XgbClient` class as follows." +#~ msgstr "" + +#~ msgid "" +#~ "Unlike neural network training, XGBoost " +#~ "trees are not started from a " +#~ "specified random weights. In this case," +#~ " we do not use :code:`get_parameters` " +#~ "and :code:`set_parameters` to initialise model" +#~ " parameters for XGBoost. As a result," +#~ " let's return an empty tensor in " +#~ ":code:`get_parameters` when it is called " +#~ "by the server at the first round." +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. From " +#~ "the second round, we load the " +#~ "global model sent from server to " +#~ "new build Booster object, and then " +#~ "update model weights on local training" +#~ " data with function :code:`local_boost` as" +#~ " follows:" +#~ msgstr "" + +#~ msgid "" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`bst_input.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`evaluate`, after loading the " +#~ "global model, we call :code:`bst.eval_set` " +#~ "function to conduct evaluation on valid" +#~ " set. The AUC value will be " +#~ "returned." +#~ msgstr "" + +#~ msgid "" +#~ "Now, we can create an instance of" +#~ " our class :code:`XgbClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` and " +#~ "call :code:`fl.client.start_client()`. The string" +#~ " :code:`\"[::]:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" + +#~ msgid "" +#~ "In a file named :code:`server.py`, " +#~ "import Flower and FedXgbBagging from " +#~ ":code:`flwr.server.strategy`." +#~ msgstr "" + +#~ msgid "" +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients. The :code:`config_func` function is" +#~ " to return the current FL round " +#~ "number to client's :code:`fit()` and " +#~ ":code:`evaluate()` methods." +#~ msgstr "" + +#~ msgid "" +#~ "In file :code:`flwr.server.strategy.fedxgb_bagging.py`," +#~ " we define :code:`FedXgbBagging` inherited " +#~ "from :code:`flwr.server.strategy.FedAvg`. Then, we" +#~ " override the :code:`aggregate_fit`, " +#~ ":code:`aggregate_evaluate` and :code:`evaluate` " +#~ "methods as follows:" +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`aggregate_fit`, we sequentially " +#~ "aggregate the clients' XGBoost trees by" +#~ " calling :code:`aggregate()` function:" +#~ msgstr "" + +#~ msgid "" +#~ "In this function, we first fetch " +#~ "the number of trees and the number" +#~ " of parallel trees for the current" +#~ " and previous model by calling " +#~ ":code:`_get_tree_nums`. Then, the fetched " +#~ "information will be aggregated. After " +#~ "that, the trees (containing model " +#~ "weights) are aggregated to generate a" +#~ " new tree model." +#~ msgstr "" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated XGBoost " +#~ "system. The AUC values can be " +#~ "checked in :code:`metrics_distributed`. One " +#~ "can see that the average AUC " +#~ "increases over FL rounds." +#~ msgstr "" + +#~ msgid "" +#~ "The full `source code " +#~ "`_ for this example can be" +#~ " found in :code:`examples/xgboost-quickstart`." +#~ msgstr "" + +#~ msgid "" +#~ "To do this, we first customise a" +#~ " :code:`ClientManager` in :code:`server_utils.py`:" +#~ msgstr "" + +#~ msgid "" +#~ "The customised :code:`ClientManager` samples " +#~ "all available clients in each FL " +#~ "round based on the order of " +#~ "connection to the server. Then, we " +#~ "define a new strategy :code:`FedXgbCyclic` " +#~ "in :code:`flwr.server.strategy.fedxgb_cyclic.py`, in " +#~ "order to sequentially select only one" +#~ " client in given round and pass " +#~ "the received model to next client." +#~ msgstr "" + +#~ msgid "" +#~ "Unlike the original :code:`FedAvg`, we " +#~ "don't perform aggregation here. Instead, " +#~ "we just make a copy of the " +#~ "received client model as global model" +#~ " by overriding :code:`aggregate_fit`." +#~ msgstr "" + +#~ msgid "" +#~ "Also, the customised :code:`configure_fit` and" +#~ " :code:`configure_evaluate` methods ensure the" +#~ " clients to be sequentially selected " +#~ "given FL round:" +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`dataset.py`, we have a " +#~ "function :code:`instantiate_partitioner` to " +#~ "instantiate the data partitioner based " +#~ "on the given :code:`num_partitions` and " +#~ ":code:`partitioner_type`. Currently, we provide " +#~ "four supported partitioner type to " +#~ "simulate the uniformity/non-uniformity in " +#~ "data quantity (uniform, linear, square, " +#~ "exponential)." +#~ msgstr "" + +#~ msgid "" +#~ "To facilitate centralised evaluation, we " +#~ "define a function in :code:`server_utils.py`:" +#~ msgstr "" + +#~ msgid "" +#~ "This function returns a evaluation " +#~ "function which instantiates a :code:`Booster`" +#~ " object and loads the global model" +#~ " weights to it. The evaluation is " +#~ "conducted by calling :code:`eval_set()` " +#~ "method, and the tested AUC value " +#~ "is reported." +#~ msgstr "" + +#~ msgid "" +#~ "As for distributed evaluation on the " +#~ "clients, it's same as the quick-" +#~ "start example by overriding the " +#~ ":code:`evaluate()` method insides the " +#~ ":code:`XgbClient` class in :code:`client_utils.py`." +#~ msgstr "" + +#~ msgid "" +#~ "We also provide an example code " +#~ "(:code:`sim.py`) to use the simulation " +#~ "capabilities of Flower to simulate " +#~ "federated XGBoost training on either a" +#~ " single machine or a cluster of " +#~ "machines." +#~ msgstr "" + +#~ msgid "" +#~ "After importing all required packages, " +#~ "we define a :code:`main()` function to" +#~ " perform the simulation process:" +#~ msgstr "" + +#~ msgid "" +#~ "We first load the dataset and " +#~ "perform data partitioning, and the " +#~ "pre-processed data is stored in a " +#~ ":code:`list`. After the simulation begins, " +#~ "the clients won't need to pre-" +#~ "process their partitions again." +#~ msgstr "" + +#~ msgid "" +#~ "After that, we start the simulation " +#~ "by calling :code:`fl.simulation.start_simulation`:" +#~ msgstr "" + +#~ msgid "" +#~ "One of key parameters for " +#~ ":code:`start_simulation` is :code:`client_fn` which" +#~ " returns a function to construct a" +#~ " client. We define it as follows:" +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`utils.py`, we define the " +#~ "arguments parsers for clients, server " +#~ "and simulation, allowing users to " +#~ "specify different experimental settings. Let's" +#~ " first see the sever side:" +#~ msgstr "" + +#~ msgid "" +#~ "This allows user to specify training " +#~ "strategies / the number of total " +#~ "clients / FL rounds / participating " +#~ "clients / clients for evaluation, and" +#~ " evaluation fashion. Note that with " +#~ ":code:`--centralised-eval`, the sever will " +#~ "do centralised evaluation and all " +#~ "functionalities for client evaluation will " +#~ "be disabled." +#~ msgstr "" + +#~ msgid "" +#~ "This defines various options for client" +#~ " data partitioning. Besides, clients also" +#~ " have an option to conduct evaluation" +#~ " on centralised test set by setting" +#~ " :code:`--centralised-eval`, as well as " +#~ "an option to perform scaled learning " +#~ "rate based on the number of " +#~ "clients by setting :code:`--scaled-lr`." +#~ msgstr "" + +#~ msgid "" +#~ "The full `code " +#~ "`_ for this comprehensive " +#~ "example can be found in :code:`examples" +#~ "/xgboost-comprehensive`." +#~ msgstr "" + +#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" +#~ msgstr "" + +#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" +#~ msgstr "" + +#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" +#~ msgstr "" + +#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" +#~ msgstr "" + +#~ msgid "|d7efb5705dd3467f991ed23746824a07|" +#~ msgstr "" + +#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" +#~ msgstr "" + +#~ msgid "|a80714782dde439ab73936518f91fc3c|" +#~ msgstr "" + +#~ msgid "|c62080ca6197473da57d191c8225a9d9|" +#~ msgstr "" + +#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" +#~ msgstr "" + +#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" +#~ msgstr "" + +#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" +#~ msgstr "" + +#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" +#~ msgstr "" + +#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" +#~ msgstr "" + +#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" +#~ msgstr "" + +#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" +#~ msgstr "" + +#~ msgid "|131af8322dc5466b827afd24be98f8c0|" +#~ msgstr "" + +#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" +#~ msgstr "" + +#~ msgid "|d62da263071d45a496f543e41fce3a19|" +#~ msgstr "" + +#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" +#~ msgstr "" + +#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" +#~ msgstr "" + +#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" +#~ msgstr "" + +#~ msgid "|b021ff9d25814458b1e631f8985a648b|" +#~ msgstr "" + +#~ msgid "|e6ca84e1df244f238288a768352678e5|" +#~ msgstr "" + +#~ msgid "|39c2422082554a21963baffb33a0d057|" +#~ msgstr "" + +#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" +#~ msgstr "" + +#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" +#~ msgstr "" + +#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" +#~ msgstr "" + +#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" +#~ msgstr "" + diff --git a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po index 44223940cdce..9c7a59d09008 100644 --- a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po +++ b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-09-24 00:29+0000\n" +"POT-Creation-Date: 2024-09-27 00:30+0000\n" "PO-Revision-Date: 2024-05-25 11:09+0000\n" "Last-Translator: Gustavo Bertoli \n" "Language: pt_BR\n" @@ -62,22 +62,22 @@ msgid "" "or not by reading the Flower source code." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:22 +#: ../../source/contributor-explanation-public-and-private-apis.rst:23 msgid "Flower public API" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:24 +#: ../../source/contributor-explanation-public-and-private-apis.rst:25 msgid "Flower has a well-defined public API. Let's look at this in more detail." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:28 +#: ../../source/contributor-explanation-public-and-private-apis.rst:29 msgid "" "Every component that is reachable by recursively following " "``__init__.__all__`` starting from the root package (``flwr``) is part of" " the public API." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:30 +#: ../../source/contributor-explanation-public-and-private-apis.rst:32 msgid "" "If you want to determine whether a component " "(class/function/generator/...) is part of the public API or not, you need" @@ -85,13 +85,13 @@ msgid "" "src/py/flwr`` to look at the Python sub-packages contained ``flwr``:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:43 +#: ../../source/contributor-explanation-public-and-private-apis.rst:46 msgid "" "Contrast this with the definition of ``__all__`` in the root " "``src/py/flwr/__init__.py``:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:55 +#: ../../source/contributor-explanation-public-and-private-apis.rst:59 msgid "" "You can see that ``flwr`` has six subpackages (``cli``, ``client``, " "``common``, ``proto``, ``server``, ``simulation``), but only four of them" @@ -99,7 +99,7 @@ msgid "" "``simulation``)." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:57 +#: ../../source/contributor-explanation-public-and-private-apis.rst:63 msgid "" "What does this mean? It means that ``client``, ``common``, ``server`` and" " ``simulation`` are part of the public API, but ``cli`` and ``proto`` are" @@ -110,21 +110,21 @@ msgid "" "even be removed completely." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:62 +#: ../../source/contributor-explanation-public-and-private-apis.rst:70 msgid "Therefore, as a Flower user:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:64 +#: ../../source/contributor-explanation-public-and-private-apis.rst:72 msgid "``from flwr import client`` ✅ Ok, you're importing a public API." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:65 +#: ../../source/contributor-explanation-public-and-private-apis.rst:73 msgid "" "``from flwr import proto`` ❌ Not recommended, you're importing a private " "API." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:67 +#: ../../source/contributor-explanation-public-and-private-apis.rst:75 msgid "" "What about components that are nested deeper in the hierarchy? Let's look" " at Flower strategies to see another typical pattern. Flower strategies " @@ -133,7 +133,7 @@ msgid "" "``src/py/flwr/server/strategy/__init__.py``:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:81 +#: ../../source/contributor-explanation-public-and-private-apis.rst:91 msgid "" "What's notable here is that all strategies are implemented in dedicated " "modules (e.g., ``fedavg.py``). In ``__init__.py``, we *import* the " @@ -145,33 +145,33 @@ msgid "" "the public API (as long as we update the import path in ``__init__.py``)." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:86 +#: ../../source/contributor-explanation-public-and-private-apis.rst:99 msgid "Therefore:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:88 +#: ../../source/contributor-explanation-public-and-private-apis.rst:101 msgid "" "``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a " "class that is part of the public API." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:89 +#: ../../source/contributor-explanation-public-and-private-apis.rst:103 msgid "" "``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're " "importing a private module." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:91 +#: ../../source/contributor-explanation-public-and-private-apis.rst:106 msgid "" "This approach is also implemented in the tooling that automatically " "builds API reference docs." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:94 +#: ../../source/contributor-explanation-public-and-private-apis.rst:110 msgid "Flower public API of private packages" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:96 +#: ../../source/contributor-explanation-public-and-private-apis.rst:112 msgid "" "We also use this to define the public API of private subpackages. Public," " in this context, means the API that other ``flwr`` subpackages should " @@ -179,14 +179,14 @@ msgid "" "not exported via ``src/py/flwr/server/__init__.py``'s ``__all__``)." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:100 +#: ../../source/contributor-explanation-public-and-private-apis.rst:117 msgid "" "Still, the private sub-package ``flwr.server.driver`` defines a " "\"public\" API using ``__all__`` in " "``src/py/flwr/server/driver/__init__.py``:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:114 +#: ../../source/contributor-explanation-public-and-private-apis.rst:132 msgid "" "The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` " "are never used by Flower framework users, only by other parts of the " @@ -198,7 +198,7 @@ msgid "" "``InMemoryDriver`` class definition)." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:117 +#: ../../source/contributor-explanation-public-and-private-apis.rst:140 msgid "" "This is because ``flwr.server.driver`` defines a public interface for " "other ``flwr`` subpackages. This allows codeowners of " @@ -237,16 +237,16 @@ msgstr "" "Antes de começarmos, precisamos encontrar alguns pré-requisitos em nosso " "ambiente de desenvolvimento local." -#: ../../source/contributor-how-to-build-docker-images.rst:12 +#: ../../source/contributor-how-to-build-docker-images.rst:13 #, fuzzy msgid "Clone the ``flower`` repository." msgstr "Clone o repositório do flower." -#: ../../source/contributor-how-to-build-docker-images.rst:18 +#: ../../source/contributor-how-to-build-docker-images.rst:19 msgid "Verify the Docker daemon is running." msgstr "Verifique que o serviço Docker está rodando." -#: ../../source/contributor-how-to-build-docker-images.rst:20 +#: ../../source/contributor-how-to-build-docker-images.rst:21 msgid "" "The build instructions that assemble the images are located in the " "respective Dockerfiles. You can find them in the subdirectories of " @@ -256,7 +256,7 @@ msgstr "" "respectivos Dockerfiles. Você pode encontrá-los nos subdiretórios " "``src/docker```." -#: ../../source/contributor-how-to-build-docker-images.rst:23 +#: ../../source/contributor-how-to-build-docker-images.rst:24 #, fuzzy msgid "" "Flower Docker images are configured via build arguments. Through build " @@ -276,146 +276,146 @@ msgstr "" "Todos os argumentos de compilação disponíveis para cada imagem estão " "listados em uma das tabelas abaixo." -#: ../../source/contributor-how-to-build-docker-images.rst:30 +#: ../../source/contributor-how-to-build-docker-images.rst:32 #, fuzzy msgid "Building the Base Image" msgstr "Construindo a imagem base" -#: ../../source/contributor-how-to-build-docker-images.rst:36 -#: ../../source/contributor-how-to-build-docker-images.rst:98 +#: ../../source/contributor-how-to-build-docker-images.rst:38 +#: ../../source/contributor-how-to-build-docker-images.rst:104 msgid "Build argument" msgstr "Argumento de compilação" -#: ../../source/contributor-how-to-build-docker-images.rst:37 -#: ../../source/contributor-how-to-build-docker-images.rst:99 +#: ../../source/contributor-how-to-build-docker-images.rst:39 +#: ../../source/contributor-how-to-build-docker-images.rst:105 msgid "Description" msgstr "Descrição" -#: ../../source/contributor-how-to-build-docker-images.rst:38 -#: ../../source/contributor-how-to-build-docker-images.rst:100 +#: ../../source/contributor-how-to-build-docker-images.rst:40 +#: ../../source/contributor-how-to-build-docker-images.rst:106 msgid "Required" msgstr "Necessário" -#: ../../source/contributor-how-to-build-docker-images.rst:39 -#: ../../source/contributor-how-to-build-docker-images.rst:101 -#: ../../source/docker/persist-superlink-state.rst:18 -#: ../../source/docker/pin-version.rst:11 +#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:107 +#: ../../source/docker/persist-superlink-state.rst:19 +#: ../../source/docker/pin-version.rst:12 #: ../../source/docker/set-environment-variables.rst:8 msgid "Example" msgstr "Exemplo" -#: ../../source/contributor-how-to-build-docker-images.rst:40 +#: ../../source/contributor-how-to-build-docker-images.rst:42 msgid "``DISTRO``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:43 #, fuzzy msgid "The Linux distribution to use as the base image." msgstr "O nome do repositório da imagem base." -#: ../../source/contributor-how-to-build-docker-images.rst:42 -#: ../../source/contributor-how-to-build-docker-images.rst:46 -#: ../../source/contributor-how-to-build-docker-images.rst:50 -#: ../../source/contributor-how-to-build-docker-images.rst:66 -#: ../../source/contributor-how-to-build-docker-images.rst:70 -#: ../../source/contributor-how-to-build-docker-images.rst:104 +#: ../../source/contributor-how-to-build-docker-images.rst:44 +#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:52 +#: ../../source/contributor-how-to-build-docker-images.rst:68 +#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:110 msgid "No" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:43 +#: ../../source/contributor-how-to-build-docker-images.rst:45 #, fuzzy msgid "``ubuntu``" msgstr "``UBUNTU_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:44 +#: ../../source/contributor-how-to-build-docker-images.rst:46 #, fuzzy msgid "``DISTRO_VERSION``" msgstr "``PIP_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:45 +#: ../../source/contributor-how-to-build-docker-images.rst:47 msgid "Version of the Linux distribution." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:47 +#: ../../source/contributor-how-to-build-docker-images.rst:49 msgid ":substitution-code:`|ubuntu_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:50 msgid "``PYTHON_VERSION``" msgstr "``PYTHON_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:49 +#: ../../source/contributor-how-to-build-docker-images.rst:51 msgid "Version of ``python`` to be installed." msgstr "Versão do ``python`` a ser instalada." -#: ../../source/contributor-how-to-build-docker-images.rst:51 +#: ../../source/contributor-how-to-build-docker-images.rst:53 msgid "``3.11`` or ``3.11.1``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:52 +#: ../../source/contributor-how-to-build-docker-images.rst:54 msgid "``PIP_VERSION``" msgstr "``PIP_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:53 +#: ../../source/contributor-how-to-build-docker-images.rst:55 msgid "Version of ``pip`` to be installed." msgstr "Versão do ``pip`` a ser instalada." -#: ../../source/contributor-how-to-build-docker-images.rst:54 -#: ../../source/contributor-how-to-build-docker-images.rst:58 -#: ../../source/contributor-how-to-build-docker-images.rst:62 -#: ../../source/contributor-how-to-build-docker-images.rst:108 +#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:60 +#: ../../source/contributor-how-to-build-docker-images.rst:64 +#: ../../source/contributor-how-to-build-docker-images.rst:114 msgid "Yes" msgstr "Sim" -#: ../../source/contributor-how-to-build-docker-images.rst:55 +#: ../../source/contributor-how-to-build-docker-images.rst:57 msgid ":substitution-code:`|pip_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:58 msgid "``SETUPTOOLS_VERSION``" msgstr "``SETUPTOOLS_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:57 +#: ../../source/contributor-how-to-build-docker-images.rst:59 msgid "Version of ``setuptools`` to be installed." msgstr "Versão do ``setuptools`` a ser instalada." -#: ../../source/contributor-how-to-build-docker-images.rst:59 +#: ../../source/contributor-how-to-build-docker-images.rst:61 #, fuzzy msgid ":substitution-code:`|setuptools_version|`" msgstr "``SETUPTOOLS_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:60 +#: ../../source/contributor-how-to-build-docker-images.rst:62 msgid "``FLWR_VERSION``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:61 +#: ../../source/contributor-how-to-build-docker-images.rst:63 msgid "Version of Flower to be installed." msgstr "Versão do Flower a ser instalada." -#: ../../source/contributor-how-to-build-docker-images.rst:63 +#: ../../source/contributor-how-to-build-docker-images.rst:65 msgid ":substitution-code:`|stable_flwr_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:64 +#: ../../source/contributor-how-to-build-docker-images.rst:66 #, fuzzy msgid "``FLWR_PACKAGE``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:65 +#: ../../source/contributor-how-to-build-docker-images.rst:67 #, fuzzy msgid "The Flower package to be installed." msgstr "Versão do Flower a ser instalada." -#: ../../source/contributor-how-to-build-docker-images.rst:67 +#: ../../source/contributor-how-to-build-docker-images.rst:69 msgid "``flwr`` or ``flwr-nightly``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:68 +#: ../../source/contributor-how-to-build-docker-images.rst:70 #, fuzzy msgid "``FLWR_VERSION_REF``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:69 +#: ../../source/contributor-how-to-build-docker-images.rst:71 msgid "" "A `direct reference " "`_." -#: ../../source/contributor-how-to-contribute-translations.rst:29 +#: ../../source/contributor-how-to-contribute-translations.rst:28 msgid "" "Once you are signed in to Weblate, you can navigate to the `Flower " "Framework project `_. Aqui, você deve ver os diferentes idiomas existentes " "que podem ser encontrados no site." -#: ../../source/contributor-how-to-contribute-translations.rst:34 +#: ../../source/contributor-how-to-contribute-translations.rst:32 msgid "" "Once you have selected the language you want to contribute to, you should" " see a similar interface to this:" @@ -585,7 +585,7 @@ msgstr "" "Uma vez que você tenha selecionado o idioma que deseja contribuir, você " "deve ver uma interface semelhante a esta:" -#: ../../source/contributor-how-to-contribute-translations.rst:39 +#: ../../source/contributor-how-to-contribute-translations.rst:37 msgid "" "The most straight forward option here is to click on the ``Translate`` " "button on the top right (in the ``Translation status`` section). This " @@ -597,12 +597,12 @@ msgstr "" "automaticamente para a interface de tradução de strings ainda não " "traduzidas." -#: ../../source/contributor-how-to-contribute-translations.rst:43 +#: ../../source/contributor-how-to-contribute-translations.rst:41 #, fuzzy msgid "This is what the interface looks like:" msgstr "É assim que a interface se parece:" -#: ../../source/contributor-how-to-contribute-translations.rst:47 +#: ../../source/contributor-how-to-contribute-translations.rst:45 msgid "" "You input your translation in the text box at the top and then, once you " "are happy with it, you either press ``Save and continue`` (to save the " @@ -619,7 +619,7 @@ msgstr "" "ou ``Skip`` (para ir para a próxima string não traduzida sem salvar nada " "na atual)." -#: ../../source/contributor-how-to-contribute-translations.rst:54 +#: ../../source/contributor-how-to-contribute-translations.rst:51 msgid "" "In order to help with the translations, you can see on the bottom the " "``Nearby strings``, the ``Comments`` (from other contributors), the " @@ -634,7 +634,7 @@ msgstr "" "(outras línguas), e o ``History`` (histórico) de traduções para esta " "string." -#: ../../source/contributor-how-to-contribute-translations.rst:59 +#: ../../source/contributor-how-to-contribute-translations.rst:56 msgid "" "On the right, under the ``String information`` section, you can also " "click the link under ``Source string location`` in order to view the " @@ -644,7 +644,7 @@ msgstr "" " link sob ``Source string location`` para visualizar a fonte do arquivo " "doc que contém a string." -#: ../../source/contributor-how-to-contribute-translations.rst:63 +#: ../../source/contributor-how-to-contribute-translations.rst:60 msgid "" "For more information about translating using Weblate, you can check out " "this `in-depth guide " @@ -654,12 +654,12 @@ msgstr "" "pode conferir este `guia detalhado " "`_." -#: ../../source/contributor-how-to-contribute-translations.rst:67 +#: ../../source/contributor-how-to-contribute-translations.rst:64 #, fuzzy msgid "Add new languages" msgstr "Adicionar novos idiomas" -#: ../../source/contributor-how-to-contribute-translations.rst:69 +#: ../../source/contributor-how-to-contribute-translations.rst:66 msgid "" "If you want to add a new language, you will first have to contact us, " "either on `Slack `_, or by opening an issue" @@ -682,20 +682,19 @@ msgid "" "extension. What is it? Read the following quote:" msgstr "" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:7 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:8 msgid "" "The Visual Studio Code Remote - Containers extension lets you use a " "Docker container as a fully-featured development environment. It allows " "you to open any folder inside (or mounted into) a container and take " "advantage of Visual Studio Code's full feature set. A " -":code:`devcontainer.json` file in your project tells VS Code how to " -"access (or create) a development container with a well-defined tool and " -"runtime stack. This container can be used to run an application or to " -"separate tools, libraries, or runtimes needed for working with a " -"codebase." +"``devcontainer.json`` file in your project tells VS Code how to access " +"(or create) a development container with a well-defined tool and runtime " +"stack. This container can be used to run an application or to separate " +"tools, libraries, or runtimes needed for working with a codebase." msgstr "" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:9 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:16 msgid "" "Workspace files are mounted from the local file system or copied or " "cloned into the container. Extensions are installed and run inside the " @@ -704,28 +703,28 @@ msgid "" " environment just by connecting to a different container." msgstr "" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:22 msgid "" "Source: `Official VSCode documentation " "`_" msgstr "" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:26 msgid "Getting started" msgstr "" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:17 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:28 msgid "" -"Configuring and setting up the :code:`Dockerfile` as well the " -"configuration for the devcontainer can be a bit more involved. The good " -"thing is you don't have to do it. Usually it should be enough to install " -"`Docker `_ on your system and " -"ensure its available on your command line. Additionally, install the " -"`VSCode Containers Extension `_ on your system and ensure its" +" available on your command line. Additionally, install the `VSCode " +"Containers Extension `_." msgstr "" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:19 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:35 msgid "" "Now you should be good to go. When starting VSCode, it will ask you to " "run in the container environment and - if you confirm - automatically " @@ -735,20 +734,20 @@ msgid "" "option *(Re)Open Folder in Container*." msgstr "" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:21 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:41 msgid "" "In some cases your setup might be more involved. For those cases consult " "the following sources:" msgstr "" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:44 msgid "" "`Developing inside a Container " "`_" msgstr "" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:46 msgid "" "`Remote development in Containers " "`_" @@ -773,154 +772,154 @@ msgid "" "``poetry.lock`` (``rm poetry.lock``) before running ``poetry install``)." msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:12 +#: ../../source/contributor-how-to-install-development-versions.rst:14 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (without " "extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:13 +#: ../../source/contributor-how-to-install-development-versions.rst:15 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (with extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:15 +#: ../../source/contributor-how-to-install-development-versions.rst:18 msgid "" "Install ``flwr`` from a local copy of the Flower source code via " "``pyproject.toml``:" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:17 +#: ../../source/contributor-how-to-install-development-versions.rst:20 msgid "``flwr = { path = \"../../\", develop = true }`` (without extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:18 +#: ../../source/contributor-how-to-install-development-versions.rst:21 msgid "" "``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " "}`` (with extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:20 +#: ../../source/contributor-how-to-install-development-versions.rst:23 msgid "Install ``flwr`` from a local wheel file via ``pyproject.toml``:" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:22 +#: ../../source/contributor-how-to-install-development-versions.rst:25 msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (without" " extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:23 +#: ../../source/contributor-how-to-install-development-versions.rst:26 msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\", extras = " "[\"simulation\"] }`` (with extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:25 +#: ../../source/contributor-how-to-install-development-versions.rst:29 msgid "" "Please refer to the Poetry documentation for further details: `Poetry " "Dependency Specification `_" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:28 +#: ../../source/contributor-how-to-install-development-versions.rst:33 msgid "Using pip (recommended on Colab)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:30 +#: ../../source/contributor-how-to-install-development-versions.rst:35 msgid "Install a ``flwr`` pre-release from PyPI:" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:32 +#: ../../source/contributor-how-to-install-development-versions.rst:37 msgid "``pip install -U --pre flwr`` (without extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:33 +#: ../../source/contributor-how-to-install-development-versions.rst:38 msgid "``pip install -U --pre 'flwr[simulation]'`` (with extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:35 +#: ../../source/contributor-how-to-install-development-versions.rst:40 msgid "" "Python packages can be installed from git repositories. Use one of the " "following commands to install the Flower directly from GitHub." msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:37 +#: ../../source/contributor-how-to-install-development-versions.rst:43 msgid "Install ``flwr`` from the default GitHub branch (``main``):" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:39 +#: ../../source/contributor-how-to-install-development-versions.rst:45 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git`` (without " "extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:40 +#: ../../source/contributor-how-to-install-development-versions.rst:46 msgid "" "``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" " (with extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:42 +#: ../../source/contributor-how-to-install-development-versions.rst:49 msgid "Install ``flwr`` from a specific GitHub branch (``branch-name``):" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:44 +#: ../../source/contributor-how-to-install-development-versions.rst:51 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " "(without extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:45 +#: ../../source/contributor-how-to-install-development-versions.rst:53 msgid "" "``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" "@branch-name'`` (with extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:49 +#: ../../source/contributor-how-to-install-development-versions.rst:57 msgid "Open Jupyter Notebooks on Google Colab" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:51 +#: ../../source/contributor-how-to-install-development-versions.rst:59 msgid "" "Open the notebook ``doc/source/tutorial-series-get-started-with-flower-" "pytorch.ipynb``:" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:53 +#: ../../source/contributor-how-to-install-development-versions.rst:61 msgid "" "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" "/tutorial-series-get-started-with-flower-pytorch.ipynb" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:55 +#: ../../source/contributor-how-to-install-development-versions.rst:63 msgid "" "Open a development version of the same notebook from branch `branch-name`" " by changing ``main`` to ``branch-name`` (right after ``blob``):" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:57 +#: ../../source/contributor-how-to-install-development-versions.rst:66 msgid "" "https://colab.research.google.com/github/adap/flower/blob/branch-" "name/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:59 +#: ../../source/contributor-how-to-install-development-versions.rst:68 msgid "Install a `whl` on Google Colab:" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:61 +#: ../../source/contributor-how-to-install-development-versions.rst:70 msgid "" "In the vertical icon grid on the left hand side, select ``Files`` > " "``Upload to session storage``" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:62 +#: ../../source/contributor-how-to-install-development-versions.rst:72 msgid "Upload the whl (e.g., ``flwr-1.8.0-py3-none-any.whl``)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:63 +#: ../../source/contributor-how-to-install-development-versions.rst:73 msgid "" "Change ``!pip install -q 'flwr[simulation]' torch torchvision " "matplotlib`` to ``!pip install -q 'flwr-1.8.0-py3-none-" @@ -937,25 +936,25 @@ msgid "" "change in the future." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:7 +#: ../../source/contributor-how-to-release-flower.rst:8 msgid "During the release" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:9 +#: ../../source/contributor-how-to-release-flower.rst:10 msgid "" "The version number of a release is stated in ``pyproject.toml``. To " "release a new version of Flower, the following things need to happen (in " "that order):" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:11 +#: ../../source/contributor-how-to-release-flower.rst:13 msgid "" "Run ``python3 src/py/flwr_tool/update_changelog.py `` in " "order to add every new change to the changelog (feel free to make manual " "changes to the changelog afterwards until it looks good)." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:12 +#: ../../source/contributor-how-to-release-flower.rst:16 msgid "" "Once the changelog has been updated with all the changes, run ``./dev" "/prepare-release-changelog.sh v``, where ```` " @@ -965,7 +964,7 @@ msgid "" "the contributors. Open a pull request with those changes." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:13 +#: ../../source/contributor-how-to-release-flower.rst:22 msgid "" "Once the pull request is merged, tag the release commit with the version " "number as soon as the PR is merged: ``git tag v`` (notice " @@ -974,100 +973,100 @@ msgid "" "artifacts and the relevant part of the changelog." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:14 +#: ../../source/contributor-how-to-release-flower.rst:26 msgid "Check the draft release on GitHub, and if everything is good, publish it." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:17 +#: ../../source/contributor-how-to-release-flower.rst:29 msgid "After the release" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:19 +#: ../../source/contributor-how-to-release-flower.rst:31 msgid "Create a pull request which contains the following changes:" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:21 +#: ../../source/contributor-how-to-release-flower.rst:33 msgid "Increase the minor version in ``pyproject.toml`` by one." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:22 +#: ../../source/contributor-how-to-release-flower.rst:34 msgid "Update all files which contain the current version number if necessary." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:23 +#: ../../source/contributor-how-to-release-flower.rst:35 msgid "Add a new ``Unreleased`` section in ``changelog.md``." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:25 +#: ../../source/contributor-how-to-release-flower.rst:37 msgid "" "Merge the pull request on the same day (i.e., before a new nightly " "release gets published to PyPI)." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:28 +#: ../../source/contributor-how-to-release-flower.rst:41 msgid "Publishing a pre-release" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:31 +#: ../../source/contributor-how-to-release-flower.rst:44 msgid "Pre-release naming" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:33 +#: ../../source/contributor-how-to-release-flower.rst:46 msgid "" "PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" " MUST use one of the following naming patterns:" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:35 +#: ../../source/contributor-how-to-release-flower.rst:49 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:36 +#: ../../source/contributor-how-to-release-flower.rst:50 msgid "Beta: ``MAJOR.MINOR.PATCHbN``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:37 +#: ../../source/contributor-how-to-release-flower.rst:51 msgid "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:39 +#: ../../source/contributor-how-to-release-flower.rst:53 msgid "Examples include:" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:41 +#: ../../source/contributor-how-to-release-flower.rst:55 msgid "``1.0.0a0``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:42 +#: ../../source/contributor-how-to-release-flower.rst:56 msgid "``1.0.0b0``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:43 +#: ../../source/contributor-how-to-release-flower.rst:57 msgid "``1.0.0rc0``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:44 +#: ../../source/contributor-how-to-release-flower.rst:58 msgid "``1.0.0rc1``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:46 +#: ../../source/contributor-how-to-release-flower.rst:60 msgid "" "This is in line with PEP-440 and the recommendations from the Python " "Packaging Authority (PyPA):" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:49 +#: ../../source/contributor-how-to-release-flower.rst:63 msgid "`PEP-440 `_" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:50 +#: ../../source/contributor-how-to-release-flower.rst:64 msgid "" "`PyPA Choosing a versioning scheme " "`_" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:52 +#: ../../source/contributor-how-to-release-flower.rst:67 msgid "" "Note that the approach defined by PyPA is not compatible with SemVer " "2.0.0 spec, for details consult the `Semantic Versioning Specification " @@ -1075,26 +1074,26 @@ msgid "" "11 on precedence)." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:55 +#: ../../source/contributor-how-to-release-flower.rst:73 msgid "Pre-release classification" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:57 +#: ../../source/contributor-how-to-release-flower.rst:75 msgid "Should the next pre-release be called alpha, beta, or release candidate?" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:59 +#: ../../source/contributor-how-to-release-flower.rst:77 msgid "" "RC: feature complete, no known issues (apart from issues that are " "classified as \"won't fix\" for the next stable release) - if no issues " "surface this will become the next stable release" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:60 +#: ../../source/contributor-how-to-release-flower.rst:80 msgid "Beta: feature complete, allowed to have known issues" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:61 +#: ../../source/contributor-how-to-release-flower.rst:81 msgid "Alpha: not feature complete, allowed to have known issues" msgstr "" @@ -1110,19 +1109,19 @@ msgid "" "the instructions or choose your preferred setup." msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:9 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:10 msgid "Python Version" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:11 -#: ../../source/how-to-install-flower.rst:8 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:12 +#: ../../source/how-to-install-flower.rst:7 msgid "" "Flower requires at least `Python 3.9 `_, " "but `Python 3.10 `_ or above is " "recommended." msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:14 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:17 msgid "" "Due to a known incompatibility with `ray " "`_, we currently recommend utilizing at " @@ -1130,11 +1129,11 @@ msgid "" "simulations." msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:19 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:22 msgid "Virtualenv with Pyenv/Virtualenv" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:21 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:24 msgid "" "One of the recommended virtual environment is `pyenv " "`_/`virtualenv `_ for details." msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:23 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 msgid "" "Once Pyenv is set up, you can use it to install `Python Version 3.10 " "`_ or above:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 msgid "Create the virtualenv with:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:42 msgid "Activate the virtualenv by running the following command:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:44 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:49 msgid "Virtualenv with Poetry" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:46 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:51 msgid "" "The Flower examples are based on `Poetry `_ to manage dependencies. After installing Poetry you " "simply create a virtual environment with:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:52 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:58 msgid "" "If you open a new terminal you can activate the previously created " "virtual environment with the following command:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:60 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:66 msgid "Virtualenv with Anaconda" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:62 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 msgid "" "If you prefer to use Anaconda for your virtual environment then install " "and setup the `conda `_ package. After setting it up you can " +"/user-guide/install/index.html>`_ package. After setting it up you can " "create a virtual environment with:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 msgid "and activate the virtual environment with:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:83 msgid "And then?" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:78 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:85 msgid "" "As soon as you created your virtual environment you clone one of the " "`Flower examples `_." @@ -1203,11 +1202,11 @@ msgstr "" msgid "Write documentation" msgstr "" -#: ../../source/contributor-how-to-write-documentation.rst:6 +#: ../../source/contributor-how-to-write-documentation.rst:5 msgid "Project layout" msgstr "" -#: ../../source/contributor-how-to-write-documentation.rst:8 +#: ../../source/contributor-how-to-write-documentation.rst:7 msgid "" "The Flower documentation lives in the ``doc`` directory. The Sphinx-based" " documentation system supports both reStructuredText (``.rst`` files) and" @@ -1215,7 +1214,7 @@ msgid "" msgstr "" #: ../../source/contributor-how-to-write-documentation.rst:10 -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:169 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:193 msgid "" "Note that, in order to build the documentation locally (with ``poetry run" " make html``, like described below), `Pandoc " @@ -1223,20 +1222,20 @@ msgid "" "system." msgstr "" -#: ../../source/contributor-how-to-write-documentation.rst:14 +#: ../../source/contributor-how-to-write-documentation.rst:15 msgid "Edit an existing page" msgstr "" -#: ../../source/contributor-how-to-write-documentation.rst:16 +#: ../../source/contributor-how-to-write-documentation.rst:17 msgid "Edit an existing ``.rst`` (or ``.md``) file under ``doc/source/``" msgstr "" -#: ../../source/contributor-how-to-write-documentation.rst:17 +#: ../../source/contributor-how-to-write-documentation.rst:18 #: ../../source/contributor-how-to-write-documentation.rst:27 msgid "Compile the docs: ``cd doc``, then ``poetry run make html``" msgstr "" -#: ../../source/contributor-how-to-write-documentation.rst:18 +#: ../../source/contributor-how-to-write-documentation.rst:19 #: ../../source/contributor-how-to-write-documentation.rst:28 msgid "Open ``doc/build/html/index.html`` in the browser to check the result" msgstr "" @@ -1269,41 +1268,41 @@ msgid "" "the Flower codebase." msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:11 +#: ../../source/contributor-ref-good-first-contributions.rst:9 msgid "Where to start" msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:13 +#: ../../source/contributor-ref-good-first-contributions.rst:11 msgid "" "Until the Flower core library matures it will be easier to get PR's " "accepted if they only touch non-core areas of the codebase. Good " "candidates to get started are:" msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:17 +#: ../../source/contributor-ref-good-first-contributions.rst:14 msgid "Documentation: What's missing? What could be expressed more clearly?" msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:18 +#: ../../source/contributor-ref-good-first-contributions.rst:15 msgid "Baselines: See below." msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:19 +#: ../../source/contributor-ref-good-first-contributions.rst:16 msgid "Examples: See below." msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:23 +#: ../../source/contributor-ref-good-first-contributions.rst:19 msgid "Request for Flower Baselines" msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:25 +#: ../../source/contributor-ref-good-first-contributions.rst:21 msgid "" "If you are not familiar with Flower Baselines, you should probably check-" "out our `contributing guide for baselines " "`_." msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:27 +#: ../../source/contributor-ref-good-first-contributions.rst:25 msgid "" "You should then check out the open `issues " "`_" @@ -1312,7 +1311,7 @@ msgid "" "working on it!" msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:31 +#: ../../source/contributor-ref-good-first-contributions.rst:30 msgid "" "Otherwise, if you don't find a baseline you'd like to work on, be sure to" " open a new issue with the baseline request template!" @@ -1353,30 +1352,30 @@ msgid "" "special case of the SecAgg+ protocol." msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:8 -msgid "The :code:`SecAgg+` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:9 +msgid "The ``SecAgg+`` abstraction" msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:10 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:11 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:163 msgid "" "In this implementation, each client will be assigned with a unique index " "(int) for secure aggregation, and thus many python dictionaries used have" " keys of int type rather than ClientProxy type." msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:65 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:198 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:67 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:204 msgid "" "The Flower server will execute and process received results in the " "following order:" msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:159 -msgid "The :code:`LightSecAgg` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +msgid "The ``LightSecAgg`` abstraction" msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:271 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:277 msgid "Types" msgstr "" @@ -1390,22 +1389,22 @@ msgid "" "are not used to contributing to GitHub projects." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:6 +#: ../../source/contributor-tutorial-contribute-on-github.rst:7 msgid "" "If you're familiar with how contributing on GitHub works, you can " "directly checkout our :doc:`getting started guide for contributors " "`." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:10 +#: ../../source/contributor-tutorial-contribute-on-github.rst:12 msgid "Setting up the repository" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:21 +#: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "**Create a GitHub account and setup Git**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:13 +#: ../../source/contributor-tutorial-contribute-on-github.rst:15 msgid "" "Git is a distributed version control tool. This allows for an entire " "codebase's history to be stored and every developer's machine. It is a " @@ -1414,20 +1413,20 @@ msgid "" "started-with-git/set-up-git>`_ to set it up." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:16 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "" "GitHub, itself, is a code hosting platform for version control and " "collaboration. It allows for everyone to collaborate and work from " "anywhere on remote repositories." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:18 +#: ../../source/contributor-tutorial-contribute-on-github.rst:25 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:20 +#: ../../source/contributor-tutorial-contribute-on-github.rst:28 msgid "" "The idea behind the generic Git and GitHub workflow boils down to this: " "you download code from a remote repository on GitHub, make changes " @@ -1435,19 +1434,19 @@ msgid "" "history back to GitHub." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:32 +#: ../../source/contributor-tutorial-contribute-on-github.rst:42 msgid "**Forking the Flower repository**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:24 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 msgid "" "A fork is a personal copy of a GitHub repository. To create one for " -"Flower, you must navigate to ``_ (while " +"Flower, you must navigate to https://github.com/adap/flower (while " "connected to your GitHub account) and click the ``Fork`` button situated " "on the top right of the page." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:29 +#: ../../source/contributor-tutorial-contribute-on-github.rst:38 msgid "" "You can change the name if you want, but this is not necessary as this " "version of Flower will be yours and will sit inside your own account " @@ -1455,11 +1454,11 @@ msgid "" " the top left corner that you are looking at your own version of Flower." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 msgid "**Cloning your forked repository**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:35 +#: ../../source/contributor-tutorial-contribute-on-github.rst:45 msgid "" "The next step is to download the forked repository on your machine to be " "able to make changes to it. On your forked repository page, you should " @@ -1467,27 +1466,27 @@ msgid "" "ability to copy the HTTPS link of the repository." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:41 +#: ../../source/contributor-tutorial-contribute-on-github.rst:52 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 msgid "" "This will create a ``flower/`` (or the name of your fork if you renamed " "it) folder in the current working directory." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:66 +#: ../../source/contributor-tutorial-contribute-on-github.rst:78 msgid "**Add origin**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:50 +#: ../../source/contributor-tutorial-contribute-on-github.rst:62 msgid "You can then go into the repository folder:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:56 +#: ../../source/contributor-tutorial-contribute-on-github.rst:68 msgid "" "And here we will need to add an origin to our repository. The origin is " "the \\ of the remote fork repository. To obtain it, we can do as " @@ -1495,27 +1494,27 @@ msgid "" "account and copying the link." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:61 +#: ../../source/contributor-tutorial-contribute-on-github.rst:75 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:90 +#: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "**Add upstream**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:69 +#: ../../source/contributor-tutorial-contribute-on-github.rst:81 msgid "" "Now we will add an upstream address to our repository. Still in the same " "directory, we must run the following command:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:76 +#: ../../source/contributor-tutorial-contribute-on-github.rst:88 msgid "The following diagram visually explains what we did in the previous steps:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:80 +#: ../../source/contributor-tutorial-contribute-on-github.rst:92 msgid "" "The upstream is the GitHub remote address of the parent repository (in " "this case Flower), i.e. the one we eventually want to contribute to and " @@ -1524,17 +1523,17 @@ msgid "" "in our own account." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:84 +#: ../../source/contributor-tutorial-contribute-on-github.rst:97 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:93 +#: ../../source/contributor-tutorial-contribute-on-github.rst:105 msgid "Setting up the coding environment" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:95 +#: ../../source/contributor-tutorial-contribute-on-github.rst:107 msgid "" "This can be achieved by following this :doc:`getting started guide for " "contributors ` (note " @@ -1542,158 +1541,158 @@ msgid "" "code and test it, you can finally start making changes!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:100 +#: ../../source/contributor-tutorial-contribute-on-github.rst:113 msgid "Making changes" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:102 +#: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" "Before making any changes make sure you are up-to-date with your " "repository:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:108 +#: ../../source/contributor-tutorial-contribute-on-github.rst:121 msgid "And with Flower's repository:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:122 +#: ../../source/contributor-tutorial-contribute-on-github.rst:134 msgid "**Create a new branch**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:115 +#: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" "To make the history cleaner and easier to work with, it is good practice " "to create a new branch for each feature/project that needs to be " "implemented." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:118 +#: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "" "To do so, just run the following command inside the repository's " "directory:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:136 msgid "**Make changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:137 msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:138 +#: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "**Test and format your code**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:128 +#: ../../source/contributor-tutorial-contribute-on-github.rst:139 msgid "" "Don't forget to test and format your code! Otherwise your code won't be " "able to be merged into the Flower repository. This is done so the " "codebase stays consistent and easy to understand." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:131 +#: ../../source/contributor-tutorial-contribute-on-github.rst:143 msgid "To do so, we have written a few scripts that you can execute:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:150 +#: ../../source/contributor-tutorial-contribute-on-github.rst:162 msgid "**Stage changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:141 +#: ../../source/contributor-tutorial-contribute-on-github.rst:152 msgid "" "Before creating a commit that will update your history, you must specify " "to Git which files it needs to take into account." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:143 +#: ../../source/contributor-tutorial-contribute-on-github.rst:155 msgid "This can be done with:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:149 +#: ../../source/contributor-tutorial-contribute-on-github.rst:161 msgid "" "To check which files have been modified compared to the last version " "(last commit) and to see which files are staged for commit, you can use " -"the :code:`git status` command." +"the ``git status`` command." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:160 +#: ../../source/contributor-tutorial-contribute-on-github.rst:173 msgid "**Commit changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:153 +#: ../../source/contributor-tutorial-contribute-on-github.rst:165 msgid "" -"Once you have added all the files you wanted to commit using :code:`git " -"add`, you can finally create your commit using this command:" +"Once you have added all the files you wanted to commit using ``git add``," +" you can finally create your commit using this command:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:159 +#: ../../source/contributor-tutorial-contribute-on-github.rst:172 msgid "" "The \\ is there to explain to others what the commit " "does. It should be written in an imperative style and be concise. An " -"example would be :code:`git commit -m \"Add images to README\"`." +"example would be ``git commit -m \"Add images to README\"``." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:171 +#: ../../source/contributor-tutorial-contribute-on-github.rst:185 msgid "**Push the changes to the fork**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:163 +#: ../../source/contributor-tutorial-contribute-on-github.rst:176 msgid "" "Once we have committed our changes, we have effectively updated our local" " history, but GitHub has no way of knowing this unless we push our " "changes to our origin's remote address:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:170 +#: ../../source/contributor-tutorial-contribute-on-github.rst:184 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:174 +#: ../../source/contributor-tutorial-contribute-on-github.rst:188 msgid "Creating and merging a pull request (PR)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:226 msgid "**Create the PR**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:177 +#: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" "Once you have pushed changes, on the GitHub webpage of your repository " "you should see the following message:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:181 +#: ../../source/contributor-tutorial-contribute-on-github.rst:196 msgid "Otherwise you can always find this option in the ``Branches`` page." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:183 +#: ../../source/contributor-tutorial-contribute-on-github.rst:198 msgid "" "Once you click the ``Compare & pull request`` button, you should see " "something similar to this:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:187 +#: ../../source/contributor-tutorial-contribute-on-github.rst:203 msgid "At the top you have an explanation of which branch will be merged where:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:191 +#: ../../source/contributor-tutorial-contribute-on-github.rst:207 msgid "" "In this example you can see that the request is to merge the branch " "``doc-fixes`` from my forked repository to branch ``main`` from the " "Flower repository." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:193 +#: ../../source/contributor-tutorial-contribute-on-github.rst:210 msgid "" "The title should be changed to adhere to the :ref:`pr_title_format` " "guidelines, otherwise it won't be possible to merge the PR. So in this " "case, a correct title might be ``docs(framework:skip) Fix typos``." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:196 +#: ../../source/contributor-tutorial-contribute-on-github.rst:214 msgid "" "The input box in the middle is there for you to describe what your PR " "does and to link it to existing issues. We have placed comments (that " @@ -1701,167 +1700,167 @@ msgid "" "process." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:199 +#: ../../source/contributor-tutorial-contribute-on-github.rst:218 msgid "It is important to follow the instructions described in comments." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:220 msgid "" "At the bottom you will find the button to open the PR. This will notify " "reviewers that a new PR has been opened and that they should look over it" " to merge or to request changes." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:204 +#: ../../source/contributor-tutorial-contribute-on-github.rst:224 msgid "" "If your PR is not yet ready for review, and you don't want to notify " "anyone, you have the option to create a draft pull request:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "**Making new changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:229 msgid "" "Once the PR has been opened (as draft or not), you can still push new " "commits to it the same way we did before, by making changes to the branch" " associated with the PR." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:253 msgid "**Review the PR**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:212 +#: ../../source/contributor-tutorial-contribute-on-github.rst:233 msgid "" "Once the PR has been opened or once the draft PR has been marked as " "ready, a review from code owners will be automatically requested:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:216 +#: ../../source/contributor-tutorial-contribute-on-github.rst:238 msgid "" "Code owners will then look into the code, ask questions, request changes " "or validate the PR." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:218 +#: ../../source/contributor-tutorial-contribute-on-github.rst:241 msgid "Merging will be blocked if there are ongoing requested changes." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:222 +#: ../../source/contributor-tutorial-contribute-on-github.rst:245 msgid "" "To resolve them, just push the necessary changes to the branch associated" " with the PR:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:226 +#: ../../source/contributor-tutorial-contribute-on-github.rst:250 msgid "And resolve the conversation:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:230 +#: ../../source/contributor-tutorial-contribute-on-github.rst:254 msgid "" "Once all the conversations have been resolved, you can re-request a " "review." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:274 msgid "**Once the PR is merged**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:234 +#: ../../source/contributor-tutorial-contribute-on-github.rst:256 msgid "" "If all the automatic tests have passed and reviewers have no more changes" " to request, they can approve the PR and merge it." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:238 +#: ../../source/contributor-tutorial-contribute-on-github.rst:261 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:245 +#: ../../source/contributor-tutorial-contribute-on-github.rst:269 msgid "Then you should update your forked repository by doing:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:277 msgid "Example of first contribution" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:257 +#: ../../source/contributor-tutorial-contribute-on-github.rst:280 msgid "Problem" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:259 +#: ../../source/contributor-tutorial-contribute-on-github.rst:282 msgid "" "For our documentation, we've started to use the `Diàtaxis framework " "`_." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:261 +#: ../../source/contributor-tutorial-contribute-on-github.rst:285 msgid "" "Our \"How to\" guides should have titles that continue the sentence \"How" " to …\", for example, \"How to upgrade to Flower 1.0\"." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:263 +#: ../../source/contributor-tutorial-contribute-on-github.rst:288 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:265 +#: ../../source/contributor-tutorial-contribute-on-github.rst:291 msgid "" "This issue is about changing the title of a doc from present continuous " "to present simple." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:267 +#: ../../source/contributor-tutorial-contribute-on-github.rst:294 msgid "" "Let's take the example of \"Saving Progress\" which we changed to \"Save " "Progress\". Does this pass our check?" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:269 +#: ../../source/contributor-tutorial-contribute-on-github.rst:297 msgid "Before: \"How to saving progress\" ❌" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 +#: ../../source/contributor-tutorial-contribute-on-github.rst:299 msgid "After: \"How to save progress\" ✅" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:274 +#: ../../source/contributor-tutorial-contribute-on-github.rst:302 msgid "Solution" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:304 msgid "" "This is a tiny change, but it'll allow us to test your end-to-end setup. " "After cloning and setting up the Flower repo, here's what you should do:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 msgid "Find the source file in ``doc/source``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 msgid "" "Make the change in the ``.rst`` file (beware, the dashes under the title " "should be the same length as the title itself)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:310 msgid "" "Build the docs and `check the result `_" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:283 +#: ../../source/contributor-tutorial-contribute-on-github.rst:314 msgid "Rename file" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:316 msgid "" "You might have noticed that the file name still reflects the old wording." " If we just change the file, then we break all existing links to it - it " @@ -1869,68 +1868,68 @@ msgid "" "engine ranking." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 +#: ../../source/contributor-tutorial-contribute-on-github.rst:320 msgid "Here's how to change the file name:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:322 msgid "Change the file name to ``save-progress.rst``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +#: ../../source/contributor-tutorial-contribute-on-github.rst:323 msgid "Add a redirect rule to ``doc/source/conf.py``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:325 msgid "" "This will cause a redirect from ``saving-progress.html`` to ``save-" "progress.html``, old links will continue to work." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:296 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Apply changes in the index file" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +#: ../../source/contributor-tutorial-contribute-on-github.rst:331 msgid "" "For the lateral navigation bar to work properly, it is very important to " "update the ``index.rst`` file as well. This is where we define the whole " "arborescence of the navbar." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 msgid "Find and modify the file name in ``index.rst``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:338 msgid "Open PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:340 msgid "" "Commit the changes (commit messages are always imperative: \"Do " "something\", in this case \"Change …\")" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:342 msgid "Push the changes to your fork" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:308 +#: ../../source/contributor-tutorial-contribute-on-github.rst:343 msgid "" "Open a PR (as shown above) with title ``docs(framework) Update how-to " "guide title``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:309 +#: ../../source/contributor-tutorial-contribute-on-github.rst:344 msgid "Wait for it to be approved!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:310 +#: ../../source/contributor-tutorial-contribute-on-github.rst:345 msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:314 +#: ../../source/contributor-tutorial-contribute-on-github.rst:348 #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:573 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1012 #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:811 @@ -1939,39 +1938,39 @@ msgstr "" msgid "Next steps" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:316 +#: ../../source/contributor-tutorial-contribute-on-github.rst:350 msgid "" "Once you have made your first PR, and want to contribute more, be sure to" " check out the following :" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:318 +#: ../../source/contributor-tutorial-contribute-on-github.rst:353 msgid "" ":doc:`Good first contributions `, where you should particularly look into the " -":code:`baselines` contributions." +"``baselines`` contributions." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:322 +#: ../../source/contributor-tutorial-contribute-on-github.rst:357 #: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:362 msgid "PR title format" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:364 msgid "We enforce the following PR title format:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:370 msgid "" "(or ``(:skip) `` to ignore the PR in the " "changelog)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +#: ../../source/contributor-tutorial-contribute-on-github.rst:372 msgid "" "Where ```` needs to be in ``{ci, fix, feat, docs, refactor, " "break}``, ```` should be in ``{framework, baselines, datasets, " @@ -1980,50 +1979,50 @@ msgid "" "verb in the imperative mood." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:341 +#: ../../source/contributor-tutorial-contribute-on-github.rst:377 #, fuzzy msgid "Valid examples:" msgstr "Exemplo" -#: ../../source/contributor-tutorial-contribute-on-github.rst:343 +#: ../../source/contributor-tutorial-contribute-on-github.rst:379 msgid "``feat(framework) Add flwr build CLI command``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:344 +#: ../../source/contributor-tutorial-contribute-on-github.rst:380 msgid "``refactor(examples:skip) Improve quickstart-pytorch logging``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:345 +#: ../../source/contributor-tutorial-contribute-on-github.rst:381 msgid "``ci(*:skip) Enforce PR title format``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:347 +#: ../../source/contributor-tutorial-contribute-on-github.rst:383 msgid "Invalid examples:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:349 +#: ../../source/contributor-tutorial-contribute-on-github.rst:385 msgid "``feat(framework): Add flwr build CLI command`` (extra ``:``)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/contributor-tutorial-contribute-on-github.rst:386 msgid "" "``feat(*) Add flwr build CLI command`` (missing ``skip`` flag along with " "``*``)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:351 +#: ../../source/contributor-tutorial-contribute-on-github.rst:387 msgid "``feat(skip) Add flwr build CLI command`` (missing ````)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:352 +#: ../../source/contributor-tutorial-contribute-on-github.rst:388 msgid "``feat(framework) add flwr build CLI command`` (non capitalised verb)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:353 +#: ../../source/contributor-tutorial-contribute-on-github.rst:389 msgid "``feat(framework) Add flwr build CLI command.`` (dot at the end)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:354 +#: ../../source/contributor-tutorial-contribute-on-github.rst:390 msgid "``Add flwr build CLI command.`` (missing ``()``)" msgstr "" @@ -2033,8 +2032,9 @@ msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 #: ../../source/docker/run-as-subprocess.rst:11 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:15 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:12 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:16 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:13 #: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" msgstr "" @@ -2057,16 +2057,16 @@ msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 msgid "" -"Flower uses :code:`pyproject.toml` to manage dependencies and configure " +"Flower uses ``pyproject.toml`` to manage dependencies and configure " "development tools (the ones which support it). Poetry is a build tool " "which supports `PEP 517 `_." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:18 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:17 msgid "Developer Machine Setup" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:21 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:20 msgid "Preliminaries" msgstr "" @@ -2084,94 +2084,93 @@ msgid "" "installation actions to add `brew` to your PATH." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:28 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:29 msgid "" "Install `xz` (to install different Python versions) and `pandoc` to build" -" the docs::" +" the docs:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:34 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:36 msgid "For Ubuntu" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:35 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:38 msgid "" "Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all " -"necessary packages::" +"necessary packages:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:44 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:47 msgid "Create Flower Dev Environment" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:46 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:49 msgid "" -"1. Clone the `Flower repository `_ from " -"GitHub::" +"Clone the `Flower repository `_ from " +"GitHub:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:52 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:56 msgid "" "Let's create the Python environment for all-things Flower. If you wish to" -" use :code:`pyenv`, we provide two convenience scripts that you can use. " -"If you prefer using something else than :code:`pyenv`, create a new " -"environment, activate and skip to the last point where all packages are " -"installed." +" use ``pyenv``, we provide two convenience scripts that you can use. If " +"you prefer using something else than ``pyenv``, create a new environment," +" activate and skip to the last point where all packages are installed." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:54 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:61 msgid "" -"If you don't have :code:`pyenv` installed, the following script that will" -" install it, set it up, and create the virtual environment (with " -":code:`Python 3.9.20` by default)::" +"If you don't have ``pyenv`` installed, the following script that will " +"install it, set it up, and create the virtual environment (with ``Python " +"3.9.20`` by default):" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:58 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:68 msgid "" -"If you already have :code:`pyenv` installed (along with the :code:`pyenv-" -"virtualenv` plugin), you can use the following convenience script (with " -":code:`Python 3.9.20` by default)::" +"If you already have ``pyenv`` installed (along with the ``pyenv-" +"virtualenv`` plugin), you can use the following convenience script (with " +"``Python 3.9.20`` by default):" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:75 msgid "" -"3. Install the Flower package in development mode (think :code:`pip " -"install -e`) along with all necessary dependencies::" +"3. Install the Flower package in development mode (think ``pip install " +"-e``) along with all necessary dependencies:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:83 msgid "Convenience Scripts" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:71 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 msgid "" "The Flower repository contains a number of convenience scripts to make " -"recurring development tasks easier and less error-prone. See the " -":code:`/dev` subdirectory for a full list. The following scripts are " -"amongst the most important ones:" +"recurring development tasks easier and less error-prone. See the ``/dev``" +" subdirectory for a full list. The following scripts are amongst the most" +" important ones:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:90 msgid "Create/Delete Virtual Environment" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:98 msgid "Compile ProtoBuf Definitions" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:105 msgid "Auto-Format Code" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:99 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112 msgid "Run Linters and Tests" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:106 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 msgid "Add a pre-commit hook" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:121 msgid "" "Developers may integrate a pre-commit hook into their workflow utilizing " "the `pre-commit `_ library. The pre-" @@ -2179,85 +2178,85 @@ msgid "" "``./dev/format.sh`` and ``./dev/test.sh`` scripts." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:110 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:125 msgid "There are multiple ways developers can use this:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:127 msgid "Install the pre-commit hook to your local git directory by simply running:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:118 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133 msgid "" "Each ``git commit`` will trigger the execution of formatting and " "linting/test scripts." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135 msgid "" "If in a hurry, bypass the hook using ``--no-verify`` with the ``git " -"commit`` command. ::" +"commit`` command." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 msgid "" "For developers who prefer not to install the hook permanently, it is " "possible to execute a one-time check prior to committing changes by using" " the following command:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149 msgid "" "This executes the formatting and linting checks/tests on all the files " "without modifying the default behavior of ``git commit``." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:153 msgid "Run Github Actions (CI) locally" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:155 msgid "" "Developers could run the full set of Github Actions workflows under their" " local environment by using `Act `_. " "Please refer to the installation instructions under the linked repository" -" and run the next command under Flower main cloned repository folder::" +" and run the next command under Flower main cloned repository folder:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:164 msgid "" "The Flower default workflow would run by setting up the required Docker " "machines underneath." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:147 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:168 msgid "Build Release" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:170 msgid "" "Flower uses Poetry to build releases. The necessary command is wrapped in" -" a simple script::" +" a simple script:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:154 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:177 msgid "" -"The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in" -" the :code:`/dist` subdirectory." +"The resulting ``.whl`` and ``.tar.gz`` releases will be stored in the " +"``/dist`` subdirectory." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:159 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:181 msgid "Build Documentation" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:161 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:183 msgid "" "Flower's documentation uses `Sphinx `_. " "There's no convenience script to re-build the documentation yet, but it's" -" pretty easy::" +" pretty easy:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:191 msgid "This will generate HTML documentation in ``doc/build/html``." msgstr "" @@ -2300,7 +2299,7 @@ msgid "" msgstr "" #: ../../source/docker/enable-tls.rst:23 -#: ../../source/docker/persist-superlink-state.rst:14 +#: ../../source/docker/persist-superlink-state.rst:15 msgid "" "If you later want to delete the directory, you can change the user ID " "back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " @@ -2322,21 +2321,21 @@ msgstr "" msgid "Understanding the command" msgstr "" -#: ../../source/docker/enable-tls.rst:44 ../../source/docker/enable-tls.rst:91 +#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 #: ../../source/docker/enable-tls.rst:125 #: ../../source/docker/tutorial-quickstart-docker.rst:66 #: ../../source/docker/tutorial-quickstart-docker.rst:103 -#: ../../source/docker/tutorial-quickstart-docker.rst:213 -#: ../../source/docker/tutorial-quickstart-docker.rst:300 +#: ../../source/docker/tutorial-quickstart-docker.rst:217 +#: ../../source/docker/tutorial-quickstart-docker.rst:305 msgid "``docker run``: This tells Docker to run a container from an image." msgstr "" -#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 +#: ../../source/docker/enable-tls.rst:46 ../../source/docker/enable-tls.rst:93 #: ../../source/docker/enable-tls.rst:126 #: ../../source/docker/tutorial-quickstart-docker.rst:67 #: ../../source/docker/tutorial-quickstart-docker.rst:104 -#: ../../source/docker/tutorial-quickstart-docker.rst:214 -#: ../../source/docker/tutorial-quickstart-docker.rst:301 +#: ../../source/docker/tutorial-quickstart-docker.rst:218 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 msgid "``--rm``: Remove the container once it is stopped or the command exits." msgstr "" @@ -2439,18 +2438,18 @@ msgstr "" msgid "the network." msgstr "" -#: ../../source/docker/enable-tls.rst:71 +#: ../../source/docker/enable-tls.rst:72 msgid "SuperNode" msgstr "" -#: ../../source/docker/enable-tls.rst:73 +#: ../../source/docker/enable-tls.rst:74 msgid "" "Assuming that the ``ca.crt`` certificate already exists locally, we can " "use the flag ``--volume`` to mount the local certificate into the " "container's ``/app/`` directory." msgstr "" -#: ../../source/docker/enable-tls.rst:78 +#: ../../source/docker/enable-tls.rst:79 msgid "" "If you're generating self-signed certificates and the ``ca.crt`` " "certificate doesn't exist on the SuperNode, you can copy it over after " @@ -2563,15 +2562,15 @@ msgstr "" msgid "Getting Started" msgstr "" -#: ../../source/docker/index.rst:20 +#: ../../source/docker/index.rst:19 msgid "Running in Production" msgstr "" -#: ../../source/docker/index.rst:29 +#: ../../source/docker/index.rst:28 msgid "Advanced Options" msgstr "" -#: ../../source/docker/index.rst:41 +#: ../../source/docker/index.rst:40 msgid "Run Flower using Docker Compose" msgstr "" @@ -2593,7 +2592,7 @@ msgid "" " on your host system and a name for the database file." msgstr "" -#: ../../source/docker/persist-superlink-state.rst:10 +#: ../../source/docker/persist-superlink-state.rst:11 msgid "" "By default, the SuperLink container runs with a non-root user called " "``app`` with the user ID ``49999``. It is recommended to create a new " @@ -2601,7 +2600,7 @@ msgid "" "the mounted directory has the proper permissions." msgstr "" -#: ../../source/docker/persist-superlink-state.rst:20 +#: ../../source/docker/persist-superlink-state.rst:21 msgid "" "In the example below, we create a new directory called ``state``, change " "the user ID and tell Docker via the flag ``--volume`` to mount the local " @@ -2610,7 +2609,7 @@ msgid "" "database file." msgstr "" -#: ../../source/docker/persist-superlink-state.rst:35 +#: ../../source/docker/persist-superlink-state.rst:36 msgid "" "As soon as the SuperLink starts, the file ``state.db`` is created in the " "``state`` directory on your host system. If the file already exists, the " @@ -2635,17 +2634,17 @@ msgid "" "by-digest-immutable-identifier>`_ of the image instead of the tag." msgstr "" -#: ../../source/docker/pin-version.rst:13 +#: ../../source/docker/pin-version.rst:14 msgid "" "The following command returns the current image digest referenced by the " ":substitution-code:`superlink:|stable_flwr_version|` tag:" msgstr "" -#: ../../source/docker/pin-version.rst:22 +#: ../../source/docker/pin-version.rst:23 msgid "This will output" msgstr "" -#: ../../source/docker/pin-version.rst:29 +#: ../../source/docker/pin-version.rst:30 msgid "Next, we can pin the digest when running a new SuperLink container:" msgstr "" @@ -2692,7 +2691,7 @@ msgid "" "``USER root`` directive within your Dockerfile." msgstr "" -#: ../../source/docker/run-as-root-user.rst:29 +#: ../../source/docker/run-as-root-user.rst:30 #, fuzzy msgid "SuperNode Dockerfile" msgstr "Construindo a imagem do servidor" @@ -2718,11 +2717,11 @@ msgid "" "done by extending the SuperNode image:" msgstr "" -#: ../../source/docker/run-as-subprocess.rst:16 +#: ../../source/docker/run-as-subprocess.rst:17 msgid "Dockerfile.supernode" msgstr "" -#: ../../source/docker/run-as-subprocess.rst:30 +#: ../../source/docker/run-as-subprocess.rst:31 msgid "" "Next, build the SuperNode Docker image by running the following command " "in the directory where Dockerfile is located:" @@ -2752,77 +2751,78 @@ msgid "" " Engine via Docker Compose." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:12 msgid "" "Some quickstart examples may have limitations or requirements that " "prevent them from running on every environment. For more information, " -"please see `Limitations`_." +"please see Limitations_." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:17 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:14 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:15 #: ../../source/docker/tutorial-quickstart-docker.rst:13 msgid "Before you start, make sure that:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:19 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:16 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 #: ../../source/docker/tutorial-quickstart-docker.rst:15 msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 #: ../../source/docker/tutorial-quickstart-docker.rst:16 #, fuzzy msgid "The Docker daemon is running." msgstr "Verifique que o serviço Docker está rodando." -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:19 msgid "Docker Compose is `installed `_." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:24 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:25 msgid "Run the Quickstart Example" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:26 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:27 msgid "" "Clone the quickstart example you like to run. For example, ``quickstart-" "pytorch``:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:34 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:35 msgid "" "Download the `compose.yml " "`_" " file into the example directory:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:41 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:44 msgid "Build and start the services using the following command:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:47 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:50 msgid "" "Append the following lines to the end of the ``pyproject.toml`` file and " "save it:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:49 -#: ../../source/docker/tutorial-quickstart-docker.rst:319 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:52 +#: ../../source/docker/tutorial-quickstart-docker.rst:324 msgid "pyproject.toml" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:58 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 msgid "" "You can customize the string that follows ``tool.flwr.federations.`` to " "fit your needs. However, please note that the string cannot contain a dot" " (``.``)." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:64 msgid "" "In this example, ``local-deployment`` has been used. Just remember to " "replace ``local-deployment`` with your chosen name in both the " @@ -2830,54 +2830,54 @@ msgid "" "command." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:65 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 #, fuzzy msgid "Run the example:" msgstr "Exemplo" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:71 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:74 msgid "Follow the logs of the SuperExec service:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:77 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 msgid "" "That is all it takes! You can monitor the progress of the run through the" " logs of the SuperExec." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 msgid "Run a Different Quickstart Example" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:82 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:86 msgid "" "To run a different quickstart example, such as ``quickstart-tensorflow``," " first, shut down the Docker Compose services of the current example:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:89 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:93 msgid "After that, you can repeat the steps above." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:92 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:98 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:96 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 msgid "Limitations" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:97 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 msgid "Quickstart Example" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:99 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 msgid "quickstart-fastai" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:100 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 #: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 #: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 #: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 @@ -2885,71 +2885,71 @@ msgstr "" msgid "None" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 msgid "quickstart-huggingface" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 msgid "quickstart-jax" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 msgid "" "The example has not yet been updated to work with the latest ``flwr`` " "version." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 msgid "quickstart-mlcube" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 msgid "quickstart-mlx" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 msgid "" "`Requires to run on macOS with Apple Silicon `_." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 msgid "quickstart-monai" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 msgid "quickstart-pandas" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:113 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 msgid "quickstart-pytorch-lightning" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 msgid "" "Requires an older pip version that is not supported by the Flower Docker " "images." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 msgid "quickstart-pytorch" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 msgid "quickstart-sklearn-tabular" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 msgid "quickstart-tabnet" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:126 msgid "quickstart-tensorflow" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:127 msgid "Only runs on AMD64." msgstr "" @@ -2964,6 +2964,203 @@ msgid "" "environment variables for a container." msgstr "" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:2 +msgid "Deploy Flower on Multiple Machines with Docker Compose" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:4 +msgid "" +"This guide will help you set up a Flower project on multiple machines " +"using Docker Compose." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:7 +msgid "" +"You will learn how to run the Flower client and server components on two " +"separate machines, with Flower configured to use TLS encryption and " +"persist SuperLink state across restarts. A server consists of a SuperLink" +" and ``SuperExec``. For more details about the Flower architecture, refer" +" to the :doc:`../explanation-flower-architecture` explainer page." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:13 +msgid "" +"This guide assumes you have completed the :doc:`tutorial-quickstart-" +"docker-compose` tutorial. It is highly recommended that you follow and " +"understand the contents of that tutorial before proceeding with this " +"guide." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:20 +msgid "Before you begin, make sure you have the following prerequisites:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:23 +msgid "The Docker daemon is running on your local machine and the remote machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:24 +msgid "" +"Docker Compose V2 is installed on both your local machine and the remote " +"machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:25 +msgid "You can connect to the remote machine from your local machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:26 +msgid "Ports ``9091`` and ``9093`` are accessible on the remote machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:30 +msgid "" +"The guide uses the |quickstart_sklearn_tabular|_ example as an example " +"project." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:32 +msgid "" +"If your project has a different name or location, please remember to " +"adjust the commands/paths accordingly." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker.rst:19 +msgid "Step 1: Set Up" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:38 +msgid "Clone the Flower repository and change to the ``distributed`` directory:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:45 +msgid "Get the IP address from the remote machine and save it for later." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 +msgid "" +"Use the ``certs.yml`` Compose file to generate your own self-signed " +"certificates. If you have certificates, you can continue with Step 2." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:51 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:221 +msgid "These certificates should be used only for development purposes." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:53 +msgid "" +"For production environments, you may have to use dedicated services to " +"obtain your certificates." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:56 +msgid "" +"First, set the environment variables ``SUPERLINK_IP`` and " +"``SUPEREXEC_IP`` with the IP address from the remote machine. For " +"example, if the IP is ``192.168.2.33``, execute:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:65 +msgid "Next, generate the self-signed certificates:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:72 +msgid "Step 2: Copy the Server Compose Files" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:74 +msgid "" +"Use the method that works best for you to copy the ``server`` directory, " +"the certificates, and your Flower project to the remote machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:77 +msgid "For example, you can use ``scp`` to copy the directories:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:87 +msgid "Step 3: Start the Flower Server Components" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:89 +msgid "" +"Log into the remote machine using ``ssh`` and run the following command " +"to start the SuperLink and SuperExec services:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:102 +msgid "" +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``server`` Docker Compose files." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:105 +msgid "Go back to your terminal on your local machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:108 +msgid "Step 4: Start the Flower Client Components" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:110 +msgid "" +"On your local machine, run the following command to start the client " +"components:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:120 +msgid "" +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``client`` Docker Compose files." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:124 +msgid "Step 5: Run Your Flower Project" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:126 +msgid "" +"Specify the remote SuperExec IP addresses and the path to the root " +"certificate in the ``[tool.flwr.federations.remote-superexec]`` table in " +"the ``pyproject.toml`` file. Here, we have named our remote federation " +"``remote-superexec``:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:130 +msgid "examples/quickstart-sklearn-tabular/pyproject.toml" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 +msgid "" +"The Path of the ``root-certificates`` should be relative to the location " +"of the ``pyproject.toml`` file." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:142 +msgid "To run the project, execute:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:148 +msgid "" +"That's it! With these steps, you've set up Flower on two separate " +"machines and are ready to start using it." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +msgid "Step 6: Clean Up" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:154 +msgid "Shut down the Flower client components:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 +msgid "Shut down the Flower server components and delete the SuperLink state:" +msgstr "" + #: ../../source/docker/tutorial-quickstart-docker.rst:2 msgid "Quickstart with Docker" msgstr "" @@ -2981,12 +3178,7 @@ msgid "" " understanding the basic workflow that uses the minimum configurations." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:21 -#: ../../source/docker/tutorial-quickstart-docker.rst:19 -msgid "Step 1: Set Up" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:31 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:32 #: ../../source/docker/tutorial-quickstart-docker.rst:21 msgid "Create a new Flower project (PyTorch):" msgstr "" @@ -3007,7 +3199,7 @@ msgstr "" msgid "Step 2: Start the SuperLink" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:60 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:62 #: ../../source/docker/tutorial-quickstart-docker.rst:52 msgid "Open your terminal and run:" msgstr "" @@ -3033,8 +3225,8 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst:71 #: ../../source/docker/tutorial-quickstart-docker.rst:108 -#: ../../source/docker/tutorial-quickstart-docker.rst:215 -#: ../../source/docker/tutorial-quickstart-docker.rst:304 +#: ../../source/docker/tutorial-quickstart-docker.rst:219 +#: ../../source/docker/tutorial-quickstart-docker.rst:309 msgid "" "``--network flwr-network``: Make the container join the network named " "``flwr-network``." @@ -3046,8 +3238,8 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst:73 #: ../../source/docker/tutorial-quickstart-docker.rst:110 -#: ../../source/docker/tutorial-quickstart-docker.rst:216 -#: ../../source/docker/tutorial-quickstart-docker.rst:306 +#: ../../source/docker/tutorial-quickstart-docker.rst:220 +#: ../../source/docker/tutorial-quickstart-docker.rst:311 msgid "" "``--detach``: Run the container in the background, freeing up the " "terminal." @@ -3163,13 +3355,13 @@ msgid "" "extends the ClientApp image and installs the required dependencies." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:148 +#: ../../source/docker/tutorial-quickstart-docker.rst:149 msgid "" "Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " "the following code into it:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:150 +#: ../../source/docker/tutorial-quickstart-docker.rst:152 msgid "Dockerfile.clientapp" msgstr "" @@ -3251,7 +3443,7 @@ msgstr "" msgid "the default command run when the container is started." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:184 +#: ../../source/docker/tutorial-quickstart-docker.rst:186 msgid "" "Note that `flwr `__ is already installed " "in the ``flwr/clientapp`` base image, so only other package dependencies " @@ -3260,20 +3452,20 @@ msgid "" "after it has been copied into the Docker image (see line 5)." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:189 +#: ../../source/docker/tutorial-quickstart-docker.rst:192 msgid "" "Next, build the ClientApp Docker image by running the following command " "in the directory where the Dockerfile is located:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:198 +#: ../../source/docker/tutorial-quickstart-docker.rst:201 msgid "" "The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " "Remember that these values are merely examples, and you can customize " "them according to your requirements." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:201 +#: ../../source/docker/tutorial-quickstart-docker.rst:205 msgid "Start the first ClientApp container:" msgstr "" @@ -3293,33 +3485,33 @@ msgstr "" msgid "``supernode-1:9094``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:222 +#: ../../source/docker/tutorial-quickstart-docker.rst:226 msgid "Start the second ClientApp container:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:233 +#: ../../source/docker/tutorial-quickstart-docker.rst:237 msgid "Step 5: Start the SuperExec" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:235 +#: ../../source/docker/tutorial-quickstart-docker.rst:239 msgid "" "The procedure for building and running a SuperExec image is almost " "identical to the ClientApp image." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:237 +#: ../../source/docker/tutorial-quickstart-docker.rst:242 msgid "" "Similar to the ClientApp image, you will need to create a Dockerfile that" " extends the SuperExec image and installs the required FAB dependencies." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:240 +#: ../../source/docker/tutorial-quickstart-docker.rst:245 msgid "" "Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " "the following code in:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:242 +#: ../../source/docker/tutorial-quickstart-docker.rst:248 msgid "Dockerfile.superexec" msgstr "" @@ -3349,13 +3541,13 @@ msgstr "" msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:277 +#: ../../source/docker/tutorial-quickstart-docker.rst:283 msgid "" "Afterward, in the directory that holds the Dockerfile, execute this " "Docker command to build the SuperExec image:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:285 +#: ../../source/docker/tutorial-quickstart-docker.rst:290 msgid "Start the SuperExec container:" msgstr "" @@ -3369,7 +3561,7 @@ msgid "" "``http://localhost:9093``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:305 +#: ../../source/docker/tutorial-quickstart-docker.rst:310 msgid "``--name superexec``: Assign the name ``superexec`` to the container." msgstr "" @@ -3389,75 +3581,75 @@ msgstr "" msgid "connect to the SuperLink running on port ``9091``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:315 +#: ../../source/docker/tutorial-quickstart-docker.rst:320 msgid "Step 6: Run the Quickstart Project" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:317 +#: ../../source/docker/tutorial-quickstart-docker.rst:322 msgid "Add the following lines to the ``pyproject.toml``:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:326 +#: ../../source/docker/tutorial-quickstart-docker.rst:331 msgid "Run the ``quickstart-docker`` project by executing the command:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:332 +#: ../../source/docker/tutorial-quickstart-docker.rst:337 msgid "Follow the SuperExec logs to track the execution of the run:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:339 +#: ../../source/docker/tutorial-quickstart-docker.rst:344 msgid "Step 7: Update the Application" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:341 +#: ../../source/docker/tutorial-quickstart-docker.rst:346 msgid "" -"Change the application code. For example, change the ``seed`` in " +"Change the application code. For example, change the ``seed`` in " "``quickstart_docker/task.py`` to ``43`` and save it:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:344 +#: ../../source/docker/tutorial-quickstart-docker.rst:349 msgid "quickstart_docker/task.py" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:351 +#: ../../source/docker/tutorial-quickstart-docker.rst:356 msgid "Stop the current ClientApp containers:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:357 +#: ../../source/docker/tutorial-quickstart-docker.rst:362 #, fuzzy msgid "Rebuild the FAB and ClientApp image:" msgstr "Construindo a imagem base" -#: ../../source/docker/tutorial-quickstart-docker.rst:363 +#: ../../source/docker/tutorial-quickstart-docker.rst:368 msgid "Launch two new ClientApp containers based on the newly built image:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:378 +#: ../../source/docker/tutorial-quickstart-docker.rst:383 msgid "Run the updated project:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:385 +#: ../../source/docker/tutorial-quickstart-docker.rst:390 msgid "Step 8: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:387 +#: ../../source/docker/tutorial-quickstart-docker.rst:392 msgid "Remove the containers and the bridge network:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:401 -#: ../../source/docker/tutorial-quickstart-docker.rst:399 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:408 +#: ../../source/docker/tutorial-quickstart-docker.rst:404 msgid "Where to Go Next" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:401 +#: ../../source/docker/tutorial-quickstart-docker.rst:406 msgid ":doc:`enable-tls`" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:402 +#: ../../source/docker/tutorial-quickstart-docker.rst:407 msgid ":doc:`persist-superlink-state`" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:403 +#: ../../source/docker/tutorial-quickstart-docker.rst:408 msgid ":doc:`tutorial-quickstart-docker-compose`" msgstr "" @@ -3479,176 +3671,176 @@ msgid "" "configuration that best suits your project's needs." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:23 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:24 msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:37 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:38 msgid "" "Export the path of the newly created project. The path should be relative" " to the location of the Docker Compose files:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:44 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:45 msgid "" "Setting the ``PROJECT_DIR`` helps Docker Compose locate the " "``pyproject.toml`` file, allowing it to install dependencies in the " "SuperExec and SuperNode images correctly." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:48 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:49 msgid "Step 2: Run Flower in Insecure Mode" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:50 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 msgid "" "To begin, start Flower with the most basic configuration. In this setup, " "Flower will run without TLS and without persisting the state." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:55 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:56 msgid "" "Without TLS, the data sent between the services remains **unencrypted**. " "Use it only for development purposes." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:58 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:59 msgid "" "For production-oriented use cases, :ref:`enable TLS` for secure data" " transmission." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:68 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:179 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 msgid "``docker compose``: The Docker command to run the Docker Compose tool." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:69 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:180 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 msgid "" "``-f compose.yml``: Specify the YAML file that contains the basic Flower " "service definitions." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 msgid "" "``--build``: Rebuild the images for each service if they don't already " "exist." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:186 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:191 msgid "" "``-d``: Detach the containers from the terminal and run them in the " "background." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:74 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 msgid "Step 3: Run the Quickstart Project" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:78 msgid "" "Now that the Flower services have been started via Docker Compose, it is " "time to run the quickstart example." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:79 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:81 msgid "" "To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" " the SuperExec addresses in the ``pyproject.toml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:82 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:228 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:86 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:234 msgid "quickstart-compose/pyproject.toml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:91 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:93 msgid "Execute the command to run the quickstart example:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:97 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:99 msgid "Monitor the SuperExec logs and wait for the summary to appear:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:104 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 msgid "Step 4: Update the Application" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 msgid "In the next step, change the application code." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 msgid "" "For example, go to the ``task.py`` file in the ``quickstart-" "compose/quickstart_compose/`` directory and add a ``print`` call in the " "``get_weights`` function:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:111 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:114 msgid "quickstart-compose/quickstart_compose/task.py" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:120 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 msgid "Rebuild and restart the services." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:124 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 msgid "" "If you have modified the dependencies listed in your ``pyproject.toml`` " "file, it is essential to rebuild images." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:127 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:132 msgid "If you haven't made any changes, you can skip this step." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:134 msgid "Run the following command to rebuild and restart the services:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:135 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:140 msgid "Run the updated quickstart example:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:142 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:147 msgid "In the SuperExec logs, you should find the ``Get weights`` line:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:159 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:164 msgid "Step 5: Persisting the SuperLink State" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:161 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 msgid "" "In this step, Flower services are configured to persist the state of the " "SuperLink service, ensuring that it maintains its state even after a " "restart." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 msgid "" "When working with Docker Compose on Linux, you may need to create the " "``state`` directory first and change its ownership to ensure proper " "access and permissions." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:169 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:174 msgid "" "For more information, consult the following page: :doc:`persist-" "superlink-state`." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:220 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:176 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 msgid "Run the command:" msgstr "" @@ -3669,17 +3861,17 @@ msgid "" "rules>`_." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:188 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:193 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:375 msgid "Rerun the ``quickstart-compose`` project:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:194 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:199 msgid "Check the content of the ``state`` directory:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:201 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:206 msgid "" "You should see a ``state.db`` file in the ``state`` directory. If you " "restart the service, the state file will be used to restore the state " @@ -3687,123 +3879,119 @@ msgid "" "if the containers are stopped and started again." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:208 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 msgid "Step 6: Run Flower with TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:210 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:216 msgid "" "To demonstrate how to enable TLS, generate self-signed certificates using" " the ``certs.yml`` Compose file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:215 -msgid "These certificates should be used only for development purposes." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:217 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 msgid "" "For production environments, use a service like `Let's Encrypt " "`_ to obtain your certificates." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:235 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 msgid "Restart the services with TLS enabled:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:249 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:255 msgid "Step 7: Add another SuperNode" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:251 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 msgid "" "You can add more SuperNodes and ClientApps by duplicating their " "definitions in the ``compose.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:254 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:260 msgid "" "Just give each new SuperNode and ClientApp service a unique service name " "like ``supernode-3``, ``clientapp-3``, etc." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:263 msgid "In ``compose.yml``, add the following:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:259 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:265 msgid "compose.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:310 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:316 msgid "" "If you also want to enable TLS for the new SuperNodes, duplicate the " "SuperNode definition for each new SuperNode service in the ``with-" "tls.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:313 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:319 msgid "" "Make sure that the names of the services match with the one in the " "``compose.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:315 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:321 msgid "In ``with-tls.yml``, add the following:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:317 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:323 msgid "with-tls.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:339 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:345 msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:341 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:347 msgid "" "To run Flower with persisted SuperLink state and enabled TLS, a slight " "change in the ``with-state.yml`` file is required:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:344 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:350 msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:346 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:352 msgid "with-state.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:363 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 msgid "Restart the services:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:377 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:383 msgid "Step 9: Merge Multiple Compose Files" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:379 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:385 msgid "" "You can merge multiple Compose files into a single file. For instance, if" " you wish to combine the basic configuration with the TLS configuration, " "execute the following command:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:387 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:394 msgid "" "This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" " a new file called ``my_compose.yml``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:391 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:398 msgid "Step 10: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:393 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 msgid "Remove all services and volumes:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:403 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:410 msgid ":doc:`run-quickstart-examples-docker-compose`" msgstr "" @@ -3818,7 +4006,7 @@ msgid "" " `Docker Hub `__." msgstr "" -#: ../../source/docker/use-a-different-version.rst:9 +#: ../../source/docker/use-a-different-version.rst:10 msgid "" "When using Flower nightly, the SuperLink nightly image must be paired " "with the corresponding SuperNode and ServerApp nightly images released on" @@ -3842,31 +4030,31 @@ msgid "" "centralized-to-federated>`." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 msgid "Centralized Training" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:14 msgid "" "All files are revised based on :doc:`Example: PyTorch - From Centralized " "To Federated `. The only " -"thing to do is modifying the file called :code:`cifar.py`, revised part " -"is shown below:" +"thing to do is modifying the file called ``cifar.py``, revised part is " +"shown below:" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:18 msgid "" "The model architecture defined in class Net() is added with Batch " "Normalization layers accordingly." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:171 msgid "You can now run your machine learning workload:" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 msgid "" "So far this should all look fairly familiar if you've used PyTorch " "before. Let's take the next step and use what we've built to create a " @@ -3874,53 +4062,53 @@ msgid "" " and two clients." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:58 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:182 msgid "Federated Training" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:60 msgid "" "If you have read :doc:`Example: PyTorch - From Centralized To Federated " "`, the following parts are" -" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " -"function in :code:`client.py` needed to revise. If not, please read the " -":doc:`Example: PyTorch - From Centralized To Federated `. first." +" easy to follow, only ``get_parameters`` and ``set_parameters`` function " +"in ``client.py`` needed to revise. If not, please read the :doc:`Example:" +" PyTorch - From Centralized To Federated `. first." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:66 msgid "" "Our example consists of one *server* and two *clients*. In FedBN, " -":code:`server.py` keeps unchanged, we can start the server directly." +"``server.py`` keeps unchanged, we can start the server directly." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:73 msgid "" -"Finally, we will revise our *client* logic by changing " -":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " -"we will exclude batch normalization parameters from model parameter list " -"when sending to or receiving from the server." +"Finally, we will revise our *client* logic by changing ``get_parameters``" +" and ``set_parameters`` in ``client.py``, we will exclude batch " +"normalization parameters from model parameter list when sending to or " +"receiving from the server." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:102 msgid "Now, you can now open two additional terminal windows and run" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:108 msgid "" "in each window (make sure that the server is still running before you do " "so) and see your (previously centralized) PyTorch project run federated " "learning with FedBN strategy across two clients. Congratulations!" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 -#: ../../source/tutorial-quickstart-jax.rst:283 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:113 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:349 +#: ../../source/tutorial-quickstart-jax.rst:319 msgid "Next Steps" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:115 msgid "" "The full source code for this example can be found `here " "`_." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:18 msgid "" -"Let's create a new file called :code:`cifar.py` with all the components " +"Let's create a new file called ``cifar.py`` with all the components " "required for a traditional (centralized) training on CIFAR-10. First, all" -" required packages (such as :code:`torch` and :code:`torchvision`) need " -"to be imported. You can see that we do not import any package for " -"federated learning. You can keep all these imports as they are even when " -"we add the federated learning components at a later point." +" required packages (such as ``torch`` and ``torchvision``) need to be " +"imported. You can see that we do not import any package for federated " +"learning. You can keep all these imports as they are even when we add the" +" federated learning components at a later point." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:36 msgid "" "As already mentioned we will use the CIFAR-10 dataset for this machine " "learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in :code:`class Net()`." +"Neural Network) is defined in ``class Net()``." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:62 msgid "" -"The :code:`load_data()` function loads the CIFAR-10 training and test " -"sets. The :code:`transform` normalized the data after loading." +"The ``load_data()`` function loads the CIFAR-10 training and test sets. " +"The ``transform`` normalized the data after loading." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:84 msgid "" -"We now need to define the training (function :code:`train()`) which loops" -" over the training set, measures the loss, backpropagates it, and then " +"We now need to define the training (function ``train()``) which loops " +"over the training set, measures the loss, backpropagates it, and then " "takes one optimizer step for each batch of training examples." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:88 msgid "" -"The evaluation of the model is defined in the function :code:`test()`. " -"The function loops over all test samples and measures the loss of the " -"model based on the test dataset." +"The evaluation of the model is defined in the function ``test()``. The " +"function loops over all test samples and measures the loss of the model " +"based on the test dataset." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:149 msgid "" "Having defined the data loading, model architecture, training, and " "evaluation we can put everything together and train our CNN on CIFAR-10." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:177 msgid "" "So far, this should all look fairly familiar if you've used PyTorch " "before. Let's take the next step and use what we've built to create a " @@ -4006,7 +4194,7 @@ msgid "" "clients." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:184 msgid "" "The simple machine learning project discussed in the previous section " "trains the model on a single dataset (CIFAR-10), we call this centralized" @@ -4017,162 +4205,161 @@ msgid "" "everything up from scratch. This can be a considerable effort." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 msgid "" "However, with Flower you can evolve your pre-existing code into a " "federated learning setup without the need for a major rewrite." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:194 msgid "" "The concept is easy to understand. We have to start a *server* and then " -"use the code in :code:`cifar.py` for the *clients* that are connected to " -"the *server*. The *server* sends model parameters to the clients. The " +"use the code in ``cifar.py`` for the *clients* that are connected to the " +"*server*. The *server* sends model parameters to the clients. The " "*clients* run the training and update the parameters. The updated " "parameters are sent back to the *server* which averages all received " "parameter updates. This describes one round of the federated learning " "process and we repeat this for multiple rounds." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 -#: ../../source/tutorial-quickstart-jax.rst:129 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:201 +#: ../../source/tutorial-quickstart-jax.rst:147 msgid "" "Our example consists of one *server* and two *clients*. Let's set up " -":code:`server.py` first. The *server* needs to import the Flower package " -":code:`flwr`. Next, we use the :code:`start_server` function to start a " -"server and tell it to perform three rounds of federated learning." +"``server.py`` first. The *server* needs to import the Flower package " +"``flwr``. Next, we use the ``start_server`` function to start a server " +"and tell it to perform three rounds of federated learning." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -#: ../../source/tutorial-quickstart-jax.rst:139 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:215 +#: ../../source/tutorial-quickstart-jax.rst:161 msgid "We can already start the *server*:" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined centralized training in :code:`cifar.py`. " -"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " -"update the parameters on our PyTorch model:" +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined centralized training in ``cifar.py``. Our " +"*client* needs to import ``flwr``, but also ``torch`` to update the " +"parameters on our PyTorch model:" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:238 msgid "" "Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " -"some of the boilerplate that would otherwise be necessary. " -":code:`CifarClient` needs to implement four methods, two methods for " -"getting/setting model parameters, one method for training the model, and " -"one method for testing the model:" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``CifarClient``. ``NumPyClient`` is slightly easier to implement " +"than ``Client`` if you use a framework with good NumPy interoperability " +"(like PyTorch or TensorFlow/Keras) because it avoids some of the " +"boilerplate that would otherwise be necessary. ``CifarClient`` needs to " +"implement four methods, two methods for getting/setting model parameters," +" one method for training the model, and one method for testing the model:" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -msgid ":code:`set_parameters`" -msgstr "" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#, fuzzy +msgid "``set_parameters``" +msgstr "``SETUPTOOLS_VERSION``" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -#: ../../source/tutorial-quickstart-jax.rst:166 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:248 +#: ../../source/tutorial-quickstart-jax.rst:192 msgid "" "set the model parameters on the local model that are received from the " "server" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 -#: ../../source/tutorial-quickstart-jax.rst:168 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#: ../../source/tutorial-quickstart-jax.rst:194 msgid "" -"loop over the list of model parameters received as NumPy " -":code:`ndarray`'s (think list of neural network layers)" +"loop over the list of model parameters received as NumPy ``ndarray``'s " +"(think list of neural network layers)" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -#: ../../source/tutorial-quickstart-jax.rst:169 -#: ../../source/tutorial-quickstart-scikitlearn.rst:118 -msgid ":code:`get_parameters`" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#: ../../source/tutorial-quickstart-scikitlearn.rst:129 +msgid "``get_parameters``" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 -#: ../../source/tutorial-quickstart-jax.rst:170 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 msgid "" -"get the model parameters and return them as a list of NumPy " -":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" +"get the model parameters and return them as a list of NumPy ``ndarray``'s" +" (which is what ``flwr.client.NumPyClient`` expects)" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid ":code:`fit`" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +msgid "``fit``" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 -#: ../../source/tutorial-quickstart-jax.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:176 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:255 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:260 +#: ../../source/tutorial-quickstart-jax.rst:200 +#: ../../source/tutorial-quickstart-jax.rst:205 msgid "" "update the parameters of the local model with the parameters received " "from the server" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 msgid "train the model on the local training set" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:258 msgid "get the updated local model weights and return them to the server" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -#: ../../source/tutorial-quickstart-jax.rst:178 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid ":code:`evaluate`" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +#: ../../source/tutorial-quickstart-jax.rst:208 +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 +msgid "``evaluate``" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 -#: ../../source/tutorial-quickstart-jax.rst:177 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:262 +#: ../../source/tutorial-quickstart-jax.rst:207 msgid "evaluate the updated model on the local test set" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 msgid "return the local loss and accuracy to the server" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:265 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`test()` previously " -"defined in :code:`cifar.py`. So what we really do here is we tell Flower " -"through our :code:`NumPyClient` subclass which of our already defined " -"functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``test()`` previously defined in ``cifar.py``. " +"So what we really do here is we tell Flower through our ``NumPyClient`` " +"subclass which of our already defined functions to call for training and " +"evaluation. We included type annotations to give you a better " +"understanding of the data types that get passed around." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:315 msgid "" "All that's left to do it to define a function that loads both model and " -"data, creates a :code:`CifarClient`, and starts this client. You load " -"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " -"with the function :code:`fl.client.start_client()` by pointing it at the " -"same IP address we used in :code:`server.py`:" +"data, creates a ``CifarClient``, and starts this client. You load your " +"data and model by using ``cifar.py``. Start ``CifarClient`` with the " +"function ``fl.client.start_client()`` by pointing it at the same IP " +"address we used in ``server.py``:" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 -#: ../../source/tutorial-quickstart-jax.rst:274 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:338 +#: ../../source/tutorial-quickstart-jax.rst:309 msgid "And that's it. You can now open two additional terminal windows and run" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:344 msgid "" "in each window (make sure that the server is running before you do so) " "and see your (previously centralized) PyTorch project run federated " "learning across two clients. Congratulations!" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:351 msgid "" "The full source code for this example: `PyTorch: From Centralized To " "Federated (Code) SuperNode communication. You can " "find the complete guide `here `_. After configuring secure connections, you" -" can enable client authentication in a long-running Flower " -":code:`SuperLink`. Use the following terminal command to start a Flower " -":code:`SuperNode` that has both secure connections and node " -"authentication enabled:" +" can enable client authentication in a long-running Flower ``SuperLink``." +" Use the following terminal command to start a Flower ``SuperNode`` that " +"has both secure connections and node authentication enabled:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:38 +#: ../../source/how-to-authenticate-supernodes.rst:47 msgid "Let's break down the authentication flags:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:40 +#: ../../source/how-to-authenticate-supernodes.rst:49 msgid "" -"The first flag :code:`--auth-list-public-keys` expects a path to a CSV " -"file storing all known node public keys. You need to store all known node" -" public keys that are allowed to participate in a federation in one CSV " -"file (:code:`.csv`)." +"The first flag ``--auth-list-public-keys`` expects a path to a CSV file " +"storing all known node public keys. You need to store all known node " +"public keys that are allowed to participate in a federation in one CSV " +"file (``.csv``)." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:42 +#: ../../source/how-to-authenticate-supernodes.rst:53 msgid "" "A valid CSV file storing known node public keys should list the keys in " "OpenSSH format, separated by commas and without any comments. For an " @@ -5413,15 +5593,15 @@ msgid "" "known node public keys." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:44 +#: ../../source/how-to-authenticate-supernodes.rst:57 msgid "" -"The second and third flags :code:`--auth-superlink-private-key` and :code" -":`--auth-superlink-public-key` expect paths to the server's private and " -"public keys. For development purposes, you can generate a private and " -"public key pair using :code:`ssh-keygen -t ecdsa -b 384`." +"The second and third flags ``--auth-superlink-private-key`` and ``--auth-" +"superlink-public-key`` expect paths to the server's private and public " +"keys. For development purposes, you can generate a private and public key" +" pair using ``ssh-keygen -t ecdsa -b 384``." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:47 +#: ../../source/how-to-authenticate-supernodes.rst:64 msgid "" "In Flower 1.9, there is no support for dynamically removing, editing, or " "adding known node public keys to the SuperLink. To change the set of " @@ -5430,32 +5610,32 @@ msgid "" " nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:53 -msgid "Enable node authentication in :code:`SuperNode`" +#: ../../source/how-to-authenticate-supernodes.rst:71 +msgid "Enable node authentication in ``SuperNode``" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:55 +#: ../../source/how-to-authenticate-supernodes.rst:73 msgid "" -"Similar to the long-running Flower server (:code:`SuperLink`), you can " -"easily enable node authentication in the long-running Flower client " -"(:code:`SuperNode`). Use the following terminal command to start an " -"authenticated :code:`SuperNode`:" +"Similar to the long-running Flower server (``SuperLink``), you can easily" +" enable node authentication in the long-running Flower client " +"(``SuperNode``). Use the following terminal command to start an " +"authenticated ``SuperNode``:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:66 +#: ../../source/how-to-authenticate-supernodes.rst:85 msgid "" -"The :code:`--auth-supernode-private-key` flag expects a path to the " -"node's private key file and the :code:`--auth-supernode-public-key` flag " -"expects a path to the node's public key file. For development purposes, " -"you can generate a private and public key pair using :code:`ssh-keygen -t" -" ecdsa -b 384`." +"The ``--auth-supernode-private-key`` flag expects a path to the node's " +"private key file and the ``--auth-supernode-public-key`` flag expects a " +"path to the node's public key file. For development purposes, you can " +"generate a private and public key pair using ``ssh-keygen -t ecdsa -b " +"384``." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:70 +#: ../../source/how-to-authenticate-supernodes.rst:91 msgid "Security notice" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:72 +#: ../../source/how-to-authenticate-supernodes.rst:93 msgid "" "The system's security relies on the credentials of the SuperLink and each" " SuperNode. Therefore, it is imperative to safeguard and safely store the" @@ -5466,19 +5646,19 @@ msgid "" "methods." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:77 -#: ../../source/how-to-enable-ssl-connections.rst:68 -#: ../../source/how-to-use-built-in-mods.rst:85 +#: ../../source/how-to-authenticate-supernodes.rst:100 +#: ../../source/how-to-enable-ssl-connections.rst:71 +#: ../../source/how-to-use-built-in-mods.rst:95 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 msgid "Conclusion" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:79 +#: ../../source/how-to-authenticate-supernodes.rst:102 msgid "" "You should now have learned how to start a long-running Flower server " -"(:code:`SuperLink`) and client (:code:`SuperNode`) with node " -"authentication enabled. You should also know the significance of the " -"private key and store it safely to minimize security risks." +"(``SuperLink``) and client (``SuperNode``) with node authentication " +"enabled. You should also know the significance of the private key and " +"store it safely to minimize security risks." msgstr "" #: ../../source/how-to-configure-clients.rst:2 @@ -5493,11 +5673,11 @@ msgid "" "the server." msgstr "" -#: ../../source/how-to-configure-clients.rst:7 +#: ../../source/how-to-configure-clients.rst:9 msgid "Configuration values" msgstr "" -#: ../../source/how-to-configure-clients.rst:9 +#: ../../source/how-to-configure-clients.rst:11 msgid "" "Configuration values are represented as a dictionary with ``str`` keys " "and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " @@ -5505,14 +5685,14 @@ msgid "" " Here is an example of a configuration dictionary in Python:" msgstr "" -#: ../../source/how-to-configure-clients.rst:20 +#: ../../source/how-to-configure-clients.rst:25 msgid "" "Flower serializes these configuration dictionaries (or *config dict* for " "short) to their ProtoBuf representation, transports them to the client " "using gRPC, and then deserializes them back to Python dictionaries." msgstr "" -#: ../../source/how-to-configure-clients.rst:24 +#: ../../source/how-to-configure-clients.rst:31 msgid "" "Currently, there is no support for directly sending collection types " "(e.g., ``Set``, ``List``, ``Map``) as values in configuration " @@ -5521,7 +5701,7 @@ msgid "" "them back on the client-side)." msgstr "" -#: ../../source/how-to-configure-clients.rst:26 +#: ../../source/how-to-configure-clients.rst:36 msgid "" "One can, for example, convert a list of floating-point numbers to a JSON " "string, then send the JSON string using the configuration dictionary, and" @@ -5529,21 +5709,21 @@ msgid "" " the client." msgstr "" -#: ../../source/how-to-configure-clients.rst:30 +#: ../../source/how-to-configure-clients.rst:41 msgid "Configuration through built-in strategies" msgstr "" -#: ../../source/how-to-configure-clients.rst:32 +#: ../../source/how-to-configure-clients.rst:43 msgid "" "The easiest way to send configuration values to clients is to use a " -"built-in strategy like :code:`FedAvg`. Built-in strategies support so-" -"called configuration functions. A configuration function is a function " -"that the built-in strategy calls to get the configuration dictionary for " -"the current round. It then forwards the configuration dictionary to all " -"the clients selected during that round." +"built-in strategy like ``FedAvg``. Built-in strategies support so-called " +"configuration functions. A configuration function is a function that the " +"built-in strategy calls to get the configuration dictionary for the " +"current round. It then forwards the configuration dictionary to all the " +"clients selected during that round." msgstr "" -#: ../../source/how-to-configure-clients.rst:34 +#: ../../source/how-to-configure-clients.rst:49 msgid "" "Let's start with a simple example. Imagine we want to send (a) the batch " "size that the client should use, (b) the current global round of " @@ -5551,18 +5731,18 @@ msgid "" "side. Our configuration function could look like this:" msgstr "" -#: ../../source/how-to-configure-clients.rst:47 +#: ../../source/how-to-configure-clients.rst:65 msgid "" "To make the built-in strategies use this function, we can pass it to " "``FedAvg`` during initialization using the parameter " -":code:`on_fit_config_fn`:" +"``on_fit_config_fn``:" msgstr "" -#: ../../source/how-to-configure-clients.rst:56 +#: ../../source/how-to-configure-clients.rst:75 msgid "One the client side, we receive the configuration dictionary in ``fit``:" msgstr "" -#: ../../source/how-to-configure-clients.rst:67 +#: ../../source/how-to-configure-clients.rst:86 msgid "" "There is also an `on_evaluate_config_fn` to configure evaluation, which " "works the same way. They are separate functions because one might want to" @@ -5570,7 +5750,7 @@ msgid "" " different batch size)." msgstr "" -#: ../../source/how-to-configure-clients.rst:69 +#: ../../source/how-to-configure-clients.rst:90 msgid "" "The built-in strategies call this function every round (that is, every " "time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " @@ -5580,29 +5760,29 @@ msgid "" "epochs during later rounds, we could do the following:" msgstr "" -#: ../../source/how-to-configure-clients.rst:82 -msgid "The :code:`FedAvg` strategy will call this function *every round*." +#: ../../source/how-to-configure-clients.rst:107 +msgid "The ``FedAvg`` strategy will call this function *every round*." msgstr "" -#: ../../source/how-to-configure-clients.rst:85 +#: ../../source/how-to-configure-clients.rst:110 msgid "Configuring individual clients" msgstr "" -#: ../../source/how-to-configure-clients.rst:87 +#: ../../source/how-to-configure-clients.rst:112 msgid "" "In some cases, it is necessary to send different configuration values to " "different clients." msgstr "" -#: ../../source/how-to-configure-clients.rst:89 +#: ../../source/how-to-configure-clients.rst:115 msgid "" "This can be achieved by customizing an existing strategy or by " ":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" -" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" -" to the config dict of a *single client* (only the first client in the " -"list, the other clients in this round to not receive this \"special\" " -"config value):" +"strategies>`. Here's a nonsensical example that customizes ``FedAvg`` by " +"adding a custom ``\"hello\": \"world\"`` configuration key/value pair to " +"the config dict of a *single client* (only the first client in the list, " +"the other clients in this round to not receive this \"special\" config " +"value):" msgstr "" #: ../../source/how-to-configure-logging.rst:2 @@ -5619,68 +5799,67 @@ msgstr "" #: ../../source/how-to-configure-logging.rst:13 msgid "" "containing relevant information including: log message level (e.g. " -":code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging " -"took place from, as well as the log message itself. In this way, the " -"logger would typically display information on your terminal as follows:" +"``INFO``, ``DEBUG``), a timestamp, the line where the logging took place " +"from, as well as the log message itself. In this way, the logger would " +"typically display information on your terminal as follows:" msgstr "" -#: ../../source/how-to-configure-logging.rst:34 +#: ../../source/how-to-configure-logging.rst:35 msgid "Saving log to file" msgstr "" -#: ../../source/how-to-configure-logging.rst:36 +#: ../../source/how-to-configure-logging.rst:37 msgid "" "By default, the Flower log is outputted to the terminal where you launch " "your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do :code:`fl.server.start_server`) and when " -"using the :code:`VirtualClientEngine` (i.e. when you do " -":code:`fl.simulation.start_simulation`). In some situations you might " -"want to save this log to disk. You can do so by calling the " +"federation (i.e. when you do ``fl.server.start_server``) and when using " +"the ``VirtualClientEngine`` (i.e. when you do " +"``fl.simulation.start_simulation``). In some situations you might want to" +" save this log to disk. You can do so by calling the " "`fl.common.logger.configure() " "`_" " function. For example:" msgstr "" -#: ../../source/how-to-configure-logging.rst:53 +#: ../../source/how-to-configure-logging.rst:59 msgid "" "With the above, Flower will record the log you see on your terminal to " -":code:`log.txt`. This file will be created in the same directory as were " -"you are running the code from. If we inspect we see the log above is also" -" recorded but prefixing with :code:`identifier` each line:" +"``log.txt``. This file will be created in the same directory as were you " +"are running the code from. If we inspect we see the log above is also " +"recorded but prefixing with ``identifier`` each line:" msgstr "" -#: ../../source/how-to-configure-logging.rst:74 +#: ../../source/how-to-configure-logging.rst:81 msgid "Log your own messages" msgstr "" -#: ../../source/how-to-configure-logging.rst:76 +#: ../../source/how-to-configure-logging.rst:83 msgid "" "You might expand the information shown by default with the Flower logger " "by adding more messages relevant to your application. You can achieve " "this easily as follows." msgstr "" -#: ../../source/how-to-configure-logging.rst:102 +#: ../../source/how-to-configure-logging.rst:114 msgid "" "In this way your logger will show, in addition to the default messages, " "the ones introduced by the clients as specified above." msgstr "" -#: ../../source/how-to-configure-logging.rst:128 +#: ../../source/how-to-configure-logging.rst:140 msgid "Log to a remote service" msgstr "" -#: ../../source/how-to-configure-logging.rst:130 +#: ../../source/how-to-configure-logging.rst:142 msgid "" -"The :code:`fl.common.logger.configure` function, also allows specifying a" -" host to which logs can be pushed (via :code:`POST`) through a native " -"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" -" feature in :code:`gRPC`-based Federated Learning workloads where " -"otherwise gathering logs from all entities (i.e. the server and the " -"clients) might be cumbersome. Note that in Flower simulation, the server " -"automatically displays all logs. You can still specify a " -":code:`HTTPHandler` should you wish to backup or analyze the logs " -"somewhere else." +"The ``fl.common.logger.configure`` function, also allows specifying a " +"host to which logs can be pushed (via ``POST``) through a native Python " +"``logging.handler.HTTPHandler``. This is a particularly useful feature in" +" ``gRPC``-based Federated Learning workloads where otherwise gathering " +"logs from all entities (i.e. the server and the clients) might be " +"cumbersome. Note that in Flower simulation, the server automatically " +"displays all logs. You can still specify a ``HTTPHandler`` should you " +"wish to backup or analyze the logs somewhere else." msgstr "" #: ../../source/how-to-enable-ssl-connections.rst:2 @@ -5690,21 +5869,21 @@ msgstr "" #: ../../source/how-to-enable-ssl-connections.rst:4 msgid "" "This guide describes how to a SSL-enabled secure Flower server " -"(:code:`SuperLink`) can be started and how a Flower client " -"(:code:`SuperNode`) can establish a secure connections to it." +"(``SuperLink``) can be started and how a Flower client (``SuperNode``) " +"can establish a secure connections to it." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:7 +#: ../../source/how-to-enable-ssl-connections.rst:8 msgid "" "A complete code example demonstrating a secure connection can be found " "`here `_." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:10 +#: ../../source/how-to-enable-ssl-connections.rst:11 msgid "" -"The code example comes with a :code:`README.md` file which explains how " -"to start it. Although it is already SSL-enabled, it might be less " +"The code example comes with a ``README.md`` file which explains how to " +"start it. Although it is already SSL-enabled, it might be less " "descriptive on how it does so. Stick to this guide for a deeper " "introduction to the topic." msgstr "" @@ -5718,17 +5897,18 @@ msgid "" "Using SSL-enabled connections requires certificates to be passed to the " "server and client. For the purpose of this guide we are going to generate" " self-signed certificates. As this can become quite complex we are going " -"to ask you to run the script in :code:`examples/advanced-" -"tensorflow/certificates/generate.sh` with the following command sequence:" +"to ask you to run the script in ``examples/advanced-" +"tensorflow/certificates/generate.sh`` with the following command " +"sequence:" msgstr "" #: ../../source/how-to-enable-ssl-connections.rst:29 msgid "" -"This will generate the certificates in :code:`examples/advanced-" -"tensorflow/.cache/certificates`." +"This will generate the certificates in ``examples/advanced-" +"tensorflow/.cache/certificates``." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:31 +#: ../../source/how-to-enable-ssl-connections.rst:32 msgid "" "The approach for generating SSL certificates in the context of this " "example can serve as an inspiration and starting point, but it should not" @@ -5739,61 +5919,61 @@ msgid "" "generated using the scripts mentioned in this guide." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:39 +#: ../../source/how-to-enable-ssl-connections.rst:40 msgid "Server (SuperLink)" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:41 +#: ../../source/how-to-enable-ssl-connections.rst:42 msgid "" "Use the following terminal command to start a sever (SuperLink) that uses" " the previously generated certificates:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:50 +#: ../../source/how-to-enable-ssl-connections.rst:52 msgid "" "When providing certificates, the server expects a tuple of three " "certificates paths: CA certificate, server certificate and server private" " key." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:54 +#: ../../source/how-to-enable-ssl-connections.rst:56 msgid "Client (SuperNode)" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:56 +#: ../../source/how-to-enable-ssl-connections.rst:58 msgid "" "Use the following terminal command to start a client (SuperNode) that " "uses the previously generated certificates:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:64 +#: ../../source/how-to-enable-ssl-connections.rst:67 msgid "" -"When setting :code:`root_certificates`, the client expects a file path to" -" PEM-encoded root certificates." +"When setting ``root_certificates``, the client expects a file path to " +"PEM-encoded root certificates." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:70 +#: ../../source/how-to-enable-ssl-connections.rst:73 msgid "" "You should now have learned how to generate self-signed certificates " "using the given script, start an SSL-enabled server and have a client " "establish a secure connection to it." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:75 +#: ../../source/how-to-enable-ssl-connections.rst:78 msgid "Additional resources" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:77 +#: ../../source/how-to-enable-ssl-connections.rst:80 msgid "" "These additional sources might be relevant if you would like to dive " "deeper into the topic of certificates:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:79 +#: ../../source/how-to-enable-ssl-connections.rst:83 msgid "`Let's Encrypt `_" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:80 +#: ../../source/how-to-enable-ssl-connections.rst:84 msgid "`certbot `_" msgstr "" @@ -5812,13 +5992,13 @@ msgid "" msgstr "" #: ../../source/how-to-implement-strategies.rst:11 -msgid "The :code:`Strategy` abstraction" +msgid "The ``Strategy`` abstraction" msgstr "" #: ../../source/how-to-implement-strategies.rst:13 msgid "" "All strategy implementation are derived from the abstract base class " -":code:`flwr.server.strategy.Strategy`, both built-in implementations and " +"``flwr.server.strategy.Strategy``, both built-in implementations and " "third party implementations. This means that custom strategy " "implementations have the exact same capabilities at their disposal as " "built-in ones." @@ -5830,52 +6010,52 @@ msgid "" "implemented:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:75 +#: ../../source/how-to-implement-strategies.rst:67 msgid "" -"Creating a new strategy means implementing a new :code:`class` (derived " -"from the abstract base class :code:`Strategy`) that implements for the " -"previously shown abstract methods:" +"Creating a new strategy means implementing a new ``class`` (derived from " +"the abstract base class ``Strategy``) that implements for the previously " +"shown abstract methods:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:100 +#: ../../source/how-to-implement-strategies.rst:97 msgid "The Flower server calls these methods in the following order:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:177 +#: ../../source/how-to-implement-strategies.rst:174 msgid "The following sections describe each of those methods in more detail." msgstr "" -#: ../../source/how-to-implement-strategies.rst:180 -msgid "The :code:`initialize_parameters` method" +#: ../../source/how-to-implement-strategies.rst:177 +msgid "The ``initialize_parameters`` method" msgstr "" -#: ../../source/how-to-implement-strategies.rst:182 +#: ../../source/how-to-implement-strategies.rst:179 msgid "" -":code:`initialize_parameters` is called only once, at the very beginning " -"of an execution. It is responsible for providing the initial global model" -" parameters in a serialized form (i.e., as a :code:`Parameters` object)." +"``initialize_parameters`` is called only once, at the very beginning of " +"an execution. It is responsible for providing the initial global model " +"parameters in a serialized form (i.e., as a ``Parameters`` object)." msgstr "" -#: ../../source/how-to-implement-strategies.rst:184 +#: ../../source/how-to-implement-strategies.rst:183 msgid "" "Built-in strategies return user-provided initial parameters. The " "following example shows how initial parameters can be passed to " -":code:`FedAvg`:" +"``FedAvg``:" msgstr "" #: ../../source/how-to-implement-strategies.rst:209 msgid "" -"The Flower server will call :code:`initialize_parameters`, which either " -"returns the parameters that were passed to :code:`initial_parameters`, or" -" :code:`None`. If no parameters are returned from " -":code:`initialize_parameters` (i.e., :code:`None`), the server will " -"randomly select one client and ask it to provide its parameters. This is " -"a convenience feature and not recommended in practice, but it can be " -"useful for prototyping. In practice, it is recommended to always use " -"server-side parameter initialization." +"The Flower server will call ``initialize_parameters``, which either " +"returns the parameters that were passed to ``initial_parameters``, or " +"``None``. If no parameters are returned from ``initialize_parameters`` " +"(i.e., ``None``), the server will randomly select one client and ask it " +"to provide its parameters. This is a convenience feature and not " +"recommended in practice, but it can be useful for prototyping. In " +"practice, it is recommended to always use server-side parameter " +"initialization." msgstr "" -#: ../../source/how-to-implement-strategies.rst:213 +#: ../../source/how-to-implement-strategies.rst:218 msgid "" "Server-side parameter initialization is a powerful mechanism. It can be " "used, for example, to resume training from a previously saved checkpoint." @@ -5884,281 +6064,275 @@ msgid "" " learning." msgstr "" -#: ../../source/how-to-implement-strategies.rst:216 -msgid "The :code:`configure_fit` method" +#: ../../source/how-to-implement-strategies.rst:224 +msgid "The ``configure_fit`` method" msgstr "" -#: ../../source/how-to-implement-strategies.rst:218 +#: ../../source/how-to-implement-strategies.rst:226 msgid "" -":code:`configure_fit` is responsible for configuring the upcoming round " -"of training. What does *configure* mean in this context? Configuring a " -"round means selecting clients and deciding what instructions to send to " -"these clients. The signature of :code:`configure_fit` makes this clear:" +"``configure_fit`` is responsible for configuring the upcoming round of " +"training. What does *configure* mean in this context? Configuring a round" +" means selecting clients and deciding what instructions to send to these " +"clients. The signature of ``configure_fit`` makes this clear:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:231 +#: ../../source/how-to-implement-strategies.rst:239 msgid "" "The return value is a list of tuples, each representing the instructions " "that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_fit`:" +"usually perform the following steps in ``configure_fit``:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:233 -#: ../../source/how-to-implement-strategies.rst:280 +#: ../../source/how-to-implement-strategies.rst:243 +#: ../../source/how-to-implement-strategies.rst:307 msgid "" -"Use the :code:`client_manager` to randomly sample all (or a subset of) " -"available clients (each represented as a :code:`ClientProxy` object)" +"Use the ``client_manager`` to randomly sample all (or a subset of) " +"available clients (each represented as a ``ClientProxy`` object)" msgstr "" -#: ../../source/how-to-implement-strategies.rst:234 +#: ../../source/how-to-implement-strategies.rst:245 msgid "" -"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " -"current global model :code:`parameters` and :code:`config` dict" +"Pair each ``ClientProxy`` with the same ``FitIns`` holding the current " +"global model ``parameters`` and ``config`` dict" msgstr "" -#: ../../source/how-to-implement-strategies.rst:236 +#: ../../source/how-to-implement-strategies.rst:248 msgid "" -"More sophisticated implementations can use :code:`configure_fit` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_fit`." +"More sophisticated implementations can use ``configure_fit`` to implement" +" custom client selection logic. A client will only participate in a round" +" if the corresponding ``ClientProxy`` is included in the list returned " +"from ``configure_fit``." msgstr "" -#: ../../source/how-to-implement-strategies.rst:240 +#: ../../source/how-to-implement-strategies.rst:254 msgid "" "The structure of this return value provides a lot of flexibility to the " "user. Since instructions are defined on a per-client basis, different " "instructions can be sent to each client. This enables custom strategies " "to train, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" -#: ../../source/how-to-implement-strategies.rst:243 -msgid "The :code:`aggregate_fit` method" +#: ../../source/how-to-implement-strategies.rst:261 +msgid "The ``aggregate_fit`` method" msgstr "" -#: ../../source/how-to-implement-strategies.rst:245 +#: ../../source/how-to-implement-strategies.rst:263 msgid "" -":code:`aggregate_fit` is responsible for aggregating the results returned" -" by the clients that were selected and asked to train in " -":code:`configure_fit`." +"``aggregate_fit`` is responsible for aggregating the results returned by " +"the clients that were selected and asked to train in ``configure_fit``." msgstr "" -#: ../../source/how-to-implement-strategies.rst:258 +#: ../../source/how-to-implement-strategies.rst:277 msgid "" "Of course, failures can happen, so there is no guarantee that the server " "will get results from all the clients it sent instructions to (via " -":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " -"of :code:`results`, but also a list of :code:`failures`." +"``configure_fit``). ``aggregate_fit`` therefore receives a list of " +"``results``, but also a list of ``failures``." msgstr "" -#: ../../source/how-to-implement-strategies.rst:260 +#: ../../source/how-to-implement-strategies.rst:282 msgid "" -":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" -" dictionary of aggregated metrics. The :code:`Parameters` return value is" -" optional because :code:`aggregate_fit` might decide that the results " -"provided are not sufficient for aggregation (e.g., too many failures)." +"``aggregate_fit`` returns an optional ``Parameters`` object and a " +"dictionary of aggregated metrics. The ``Parameters`` return value is " +"optional because ``aggregate_fit`` might decide that the results provided" +" are not sufficient for aggregation (e.g., too many failures)." msgstr "" -#: ../../source/how-to-implement-strategies.rst:263 -msgid "The :code:`configure_evaluate` method" +#: ../../source/how-to-implement-strategies.rst:288 +msgid "The ``configure_evaluate`` method" msgstr "" -#: ../../source/how-to-implement-strategies.rst:265 +#: ../../source/how-to-implement-strategies.rst:290 msgid "" -":code:`configure_evaluate` is responsible for configuring the upcoming " -"round of evaluation. What does *configure* mean in this context? " -"Configuring a round means selecting clients and deciding what " -"instructions to send to these clients. The signature of " -":code:`configure_evaluate` makes this clear:" +"``configure_evaluate`` is responsible for configuring the upcoming round " +"of evaluation. What does *configure* mean in this context? Configuring a " +"round means selecting clients and deciding what instructions to send to " +"these clients. The signature of ``configure_evaluate`` makes this clear:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:278 +#: ../../source/how-to-implement-strategies.rst:303 msgid "" "The return value is a list of tuples, each representing the instructions " "that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_evaluate`:" +"usually perform the following steps in ``configure_evaluate``:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:281 +#: ../../source/how-to-implement-strategies.rst:309 msgid "" -"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " -"the current global model :code:`parameters` and :code:`config` dict" +"Pair each ``ClientProxy`` with the same ``EvaluateIns`` holding the " +"current global model ``parameters`` and ``config`` dict" msgstr "" -#: ../../source/how-to-implement-strategies.rst:283 +#: ../../source/how-to-implement-strategies.rst:312 msgid "" -"More sophisticated implementations can use :code:`configure_evaluate` to " +"More sophisticated implementations can use ``configure_evaluate`` to " "implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_evaluate`." +"in a round if the corresponding ``ClientProxy`` is included in the list " +"returned from ``configure_evaluate``." msgstr "" -#: ../../source/how-to-implement-strategies.rst:287 +#: ../../source/how-to-implement-strategies.rst:318 msgid "" "The structure of this return value provides a lot of flexibility to the " "user. Since instructions are defined on a per-client basis, different " "instructions can be sent to each client. This enables custom strategies " "to evaluate, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" -#: ../../source/how-to-implement-strategies.rst:291 -msgid "The :code:`aggregate_evaluate` method" +#: ../../source/how-to-implement-strategies.rst:325 +msgid "The ``aggregate_evaluate`` method" msgstr "" -#: ../../source/how-to-implement-strategies.rst:293 +#: ../../source/how-to-implement-strategies.rst:327 msgid "" -":code:`aggregate_evaluate` is responsible for aggregating the results " +"``aggregate_evaluate`` is responsible for aggregating the results " "returned by the clients that were selected and asked to evaluate in " -":code:`configure_evaluate`." +"``configure_evaluate``." msgstr "" -#: ../../source/how-to-implement-strategies.rst:306 +#: ../../source/how-to-implement-strategies.rst:341 msgid "" "Of course, failures can happen, so there is no guarantee that the server " "will get results from all the clients it sent instructions to (via " -":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " -"receives a list of :code:`results`, but also a list of :code:`failures`." +"``configure_evaluate``). ``aggregate_evaluate`` therefore receives a list" +" of ``results``, but also a list of ``failures``." msgstr "" -#: ../../source/how-to-implement-strategies.rst:308 +#: ../../source/how-to-implement-strategies.rst:346 msgid "" -":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" -" dictionary of aggregated metrics. The :code:`float` return value is " -"optional because :code:`aggregate_evaluate` might decide that the results" -" provided are not sufficient for aggregation (e.g., too many failures)." +"``aggregate_evaluate`` returns an optional ``float`` (loss) and a " +"dictionary of aggregated metrics. The ``float`` return value is optional " +"because ``aggregate_evaluate`` might decide that the results provided are" +" not sufficient for aggregation (e.g., too many failures)." msgstr "" -#: ../../source/how-to-implement-strategies.rst:311 -msgid "The :code:`evaluate` method" +#: ../../source/how-to-implement-strategies.rst:352 +msgid "The ``evaluate`` method" msgstr "" -#: ../../source/how-to-implement-strategies.rst:313 +#: ../../source/how-to-implement-strategies.rst:354 msgid "" -":code:`evaluate` is responsible for evaluating model parameters on the " -"server-side. Having :code:`evaluate` in addition to " -":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " -"to perform both servers-side and client-side (federated) evaluation." +"``evaluate`` is responsible for evaluating model parameters on the " +"server-side. Having ``evaluate`` in addition to " +"``configure_evaluate``/``aggregate_evaluate`` enables strategies to " +"perform both servers-side and client-side (federated) evaluation." msgstr "" -#: ../../source/how-to-implement-strategies.rst:323 +#: ../../source/how-to-implement-strategies.rst:364 msgid "" "The return value is again optional because the strategy might not need to" " implement server-side evaluation or because the user-defined " -":code:`evaluate` method might not complete successfully (e.g., it might " -"fail to load the server-side evaluation data)." +"``evaluate`` method might not complete successfully (e.g., it might fail " +"to load the server-side evaluation data)." msgstr "" #: ../../source/how-to-install-flower.rst:2 msgid "Install Flower" msgstr "" -#: ../../source/how-to-install-flower.rst:6 +#: ../../source/how-to-install-flower.rst:5 msgid "Python version" msgstr "" -#: ../../source/how-to-install-flower.rst:12 +#: ../../source/how-to-install-flower.rst:11 msgid "Install stable release" msgstr "" -#: ../../source/how-to-install-flower.rst:15 -#: ../../source/how-to-upgrade-to-flower-next.rst:46 +#: ../../source/how-to-install-flower.rst:14 +#: ../../source/how-to-upgrade-to-flower-next.rst:66 msgid "Using pip" msgstr "" -#: ../../source/how-to-install-flower.rst:17 -msgid "" -"Stable releases are available on `PyPI " -"`_::" +#: ../../source/how-to-install-flower.rst:16 +msgid "Stable releases are available on `PyPI `_:" msgstr "" -#: ../../source/how-to-install-flower.rst:21 +#: ../../source/how-to-install-flower.rst:22 msgid "" "For simulations that use the Virtual Client Engine, ``flwr`` should be " -"installed with the ``simulation`` extra::" +"installed with the ``simulation`` extra:" msgstr "" -#: ../../source/how-to-install-flower.rst:27 +#: ../../source/how-to-install-flower.rst:30 msgid "Using conda (or mamba)" msgstr "" -#: ../../source/how-to-install-flower.rst:29 +#: ../../source/how-to-install-flower.rst:32 msgid "Flower can also be installed from the ``conda-forge`` channel." msgstr "" -#: ../../source/how-to-install-flower.rst:31 +#: ../../source/how-to-install-flower.rst:34 msgid "" "If you have not added ``conda-forge`` to your channels, you will first " -"need to run the following::" +"need to run the following:" msgstr "" -#: ../../source/how-to-install-flower.rst:36 +#: ../../source/how-to-install-flower.rst:42 msgid "" "Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " -"installed with ``conda``::" +"installed with ``conda``:" msgstr "" -#: ../../source/how-to-install-flower.rst:40 -msgid "or with ``mamba``::" +#: ../../source/how-to-install-flower.rst:49 +msgid "or with ``mamba``:" msgstr "" -#: ../../source/how-to-install-flower.rst:46 +#: ../../source/how-to-install-flower.rst:56 msgid "Verify installation" msgstr "" -#: ../../source/how-to-install-flower.rst:48 +#: ../../source/how-to-install-flower.rst:58 msgid "" "The following command can be used to verify if Flower was successfully " "installed. If everything worked, it should print the version of Flower to" " the command line:" msgstr "" -#: ../../source/how-to-install-flower.rst:58 +#: ../../source/how-to-install-flower.rst:68 msgid "Advanced installation options" msgstr "" -#: ../../source/how-to-install-flower.rst:61 +#: ../../source/how-to-install-flower.rst:71 msgid "Install via Docker" msgstr "" -#: ../../source/how-to-install-flower.rst:63 +#: ../../source/how-to-install-flower.rst:73 msgid ":doc:`Run Flower using Docker `" msgstr "" -#: ../../source/how-to-install-flower.rst:66 +#: ../../source/how-to-install-flower.rst:76 msgid "Install pre-release" msgstr "" -#: ../../source/how-to-install-flower.rst:68 +#: ../../source/how-to-install-flower.rst:78 msgid "" "New (possibly unstable) versions of Flower are sometimes available as " "pre-release versions (alpha, beta, release candidate) before the stable " -"release happens::" +"release happens:" msgstr "" -#: ../../source/how-to-install-flower.rst:72 +#: ../../source/how-to-install-flower.rst:85 msgid "" "For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" -" should be installed with the ``simulation`` extra::" +" should be installed with the ``simulation`` extra:" msgstr "" -#: ../../source/how-to-install-flower.rst:77 +#: ../../source/how-to-install-flower.rst:93 msgid "Install nightly release" msgstr "" -#: ../../source/how-to-install-flower.rst:79 +#: ../../source/how-to-install-flower.rst:95 msgid "" "The latest (potentially unstable) changes in Flower are available as " -"nightly releases::" +"nightly releases:" msgstr "" -#: ../../source/how-to-install-flower.rst:83 +#: ../../source/how-to-install-flower.rst:101 msgid "" "For simulations that use the Virtual Client Engine, ``flwr-nightly`` " -"should be installed with the ``simulation`` extra::" +"should be installed with the ``simulation`` extra:" msgstr "" #: ../../source/how-to-monitor-simulation.rst:2 @@ -6174,17 +6348,17 @@ msgid "" "you make smarter decisions and speed up the execution time." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:6 +#: ../../source/how-to-monitor-simulation.rst:9 msgid "" "The specific instructions assume you are using macOS and have the " "`Homebrew `_ package manager installed." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:10 +#: ../../source/how-to-monitor-simulation.rst:13 msgid "Downloads" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:16 +#: ../../source/how-to-monitor-simulation.rst:19 msgid "" "`Prometheus `_ is used for data collection, while" " `Grafana `_ will enable you to visualize the " @@ -6192,89 +6366,89 @@ msgid "" "`_ which Flower uses under the hood." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:18 +#: ../../source/how-to-monitor-simulation.rst:23 msgid "" "Overwrite the configuration files (depending on your device, it might be " "installed on a different path)." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:20 +#: ../../source/how-to-monitor-simulation.rst:26 msgid "If you are on an M1 Mac, it should be:" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:27 +#: ../../source/how-to-monitor-simulation.rst:33 msgid "On the previous generation Intel Mac devices, it should be:" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:34 +#: ../../source/how-to-monitor-simulation.rst:40 msgid "" "Open the respective configuration files and change them. Depending on " "your device, use one of the two following commands:" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:44 +#: ../../source/how-to-monitor-simulation.rst:51 msgid "" "and then delete all the text in the file and paste a new Prometheus " "config you see below. You may adjust the time intervals to your " "requirements:" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:59 +#: ../../source/how-to-monitor-simulation.rst:67 msgid "" "Now after you have edited the Prometheus configuration, do the same with " "the Grafana configuration files. Open those using one of the following " "commands as before:" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:69 +#: ../../source/how-to-monitor-simulation.rst:78 msgid "" "Your terminal editor should open and allow you to apply the following " "configuration as before." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:84 +#: ../../source/how-to-monitor-simulation.rst:94 msgid "" "Congratulations, you just downloaded all the necessary software needed " "for metrics tracking. Now, let’s start it." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:88 +#: ../../source/how-to-monitor-simulation.rst:98 msgid "Tracking metrics" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:90 +#: ../../source/how-to-monitor-simulation.rst:100 msgid "" "Before running your Flower simulation, you have to start the monitoring " "tools you have just installed and configured." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:97 +#: ../../source/how-to-monitor-simulation.rst:108 msgid "" "Please include the following argument in your Python code when starting a" " simulation." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:108 +#: ../../source/how-to-monitor-simulation.rst:119 msgid "Now, you are ready to start your workload." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:110 +#: ../../source/how-to-monitor-simulation.rst:121 msgid "" "Shortly after the simulation starts, you should see the following logs in" " your terminal:" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:117 -msgid "You can look at everything at ``_ ." +#: ../../source/how-to-monitor-simulation.rst:127 +msgid "You can look at everything at http://127.0.0.1:8265 ." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:119 +#: ../../source/how-to-monitor-simulation.rst:129 msgid "" "It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" " lowest option)." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:121 +#: ../../source/how-to-monitor-simulation.rst:132 msgid "" "Or alternatively, you can just see them in Grafana by clicking on the " "right-up corner, “View in Grafana”. Please note that the Ray dashboard is" @@ -6283,24 +6457,24 @@ msgid "" "going to ``http://localhost:3000/``." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:123 +#: ../../source/how-to-monitor-simulation.rst:137 msgid "" "After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port :code:`3000` on " -"your machine as long as they are running." +"important as they will otherwise block, for example port ``3000`` on your" +" machine as long as they are running." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:132 +#: ../../source/how-to-monitor-simulation.rst:147 msgid "Resource allocation" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:134 +#: ../../source/how-to-monitor-simulation.rst:149 msgid "" "You must understand how the Ray library works to efficiently allocate " "system resources to simulation clients on your own." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:136 +#: ../../source/how-to-monitor-simulation.rst:152 msgid "" "Initially, the simulation (which Ray handles under the hood) starts by " "default with all the available resources on the system, which it shares " @@ -6310,89 +6484,89 @@ msgid "" "check the system resources by running the following:" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:143 +#: ../../source/how-to-monitor-simulation.rst:164 msgid "In Google Colab, the result you see might be similar to this:" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:155 +#: ../../source/how-to-monitor-simulation.rst:175 msgid "" "However, you can overwrite the defaults. When starting a simulation, do " "the following (you don't need to overwrite all of them):" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:175 +#: ../../source/how-to-monitor-simulation.rst:195 msgid "Let’s also specify the resource for a single client." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:205 +#: ../../source/how-to-monitor-simulation.rst:225 msgid "" "Now comes the crucial part. Ray will start a new client only when it has " "all the required resources (such that they run in parallel) when the " "resources allow." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:207 +#: ../../source/how-to-monitor-simulation.rst:228 msgid "" "In the example above, only one client will be run, so your clients won't " -"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " -"running two clients and therefore enable them to run concurrently. Be " -"careful not to require more resources than available. If you specified " -":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " -"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." +"run concurrently. Setting ``client_num_gpus = 0.5`` would allow running " +"two clients and therefore enable them to run concurrently. Be careful not" +" to require more resources than available. If you specified " +"``client_num_gpus = 2``, the simulation wouldn't start (even if you had 2" +" GPUs but decided to set 1 in ``ray_init_args``)." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 +#: ../../source/how-to-monitor-simulation.rst:235 ../../source/ref-faq.rst:2 msgid "FAQ" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:214 +#: ../../source/how-to-monitor-simulation.rst:237 msgid "Q: I don't see any metrics logged." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:216 +#: ../../source/how-to-monitor-simulation.rst:239 msgid "" "A: The timeframe might not be properly set. The setting is in the top " "right corner (\"Last 30 minutes\" by default). Please change the " "timeframe to reflect the period when the simulation was running." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:218 +#: ../../source/how-to-monitor-simulation.rst:243 msgid "" "Q: I see “Grafana server not detected. Please make sure the Grafana " "server is running and refresh this page” after going to the Metrics tab " "in Ray Dashboard." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:220 +#: ../../source/how-to-monitor-simulation.rst:246 msgid "" "A: You probably don't have Grafana running. Please check the running " "services" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:226 +#: ../../source/how-to-monitor-simulation.rst:252 msgid "" "Q: I see \"This site can't be reached\" when going to " -"``_." +"http://127.0.0.1:8265." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:228 +#: ../../source/how-to-monitor-simulation.rst:254 msgid "" "A: Either the simulation has already finished, or you still need to start" " Prometheus." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:232 +#: ../../source/how-to-monitor-simulation.rst:257 msgid "Resources" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:234 +#: ../../source/how-to-monitor-simulation.rst:259 msgid "" -"Ray Dashboard: ``_" +"Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-" +"started.html" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:236 -msgid "Ray Metrics: ``_" +#: ../../source/how-to-monitor-simulation.rst:261 +msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" msgstr "" #: ../../source/how-to-run-simulations.rst:2 @@ -6415,19 +6589,19 @@ msgid "" "VCE." msgstr "" -#: ../../source/how-to-run-simulations.rst:10 +#: ../../source/how-to-run-simulations.rst:19 msgid "" -"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" -" clients. These clients are identical to `non-virtual` clients (i.e. the " +"The ``VirtualClientEngine`` schedules, launches and manages `virtual` " +"clients. These clients are identical to `non-virtual` clients (i.e. the " "ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " "creating a class inheriting, for example, from `flwr.client.NumPyClient " "`_ and therefore behave in an " "identical way. In addition to that, clients managed by the " -":code:`VirtualClientEngine` are:" +"``VirtualClientEngine`` are:" msgstr "" -#: ../../source/how-to-run-simulations.rst:12 +#: ../../source/how-to-run-simulations.rst:26 msgid "" "resource-aware: this means that each client gets assigned a portion of " "the compute and memory on your system. You as a user can control this at " @@ -6436,14 +6610,14 @@ msgid "" "client, the more clients can run concurrently on the same hardware." msgstr "" -#: ../../source/how-to-run-simulations.rst:13 +#: ../../source/how-to-run-simulations.rst:31 msgid "" "self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " +" manually, instead this gets delegated to ``VirtualClientEngine``'s " "internals." msgstr "" -#: ../../source/how-to-run-simulations.rst:14 +#: ../../source/how-to-run-simulations.rst:33 msgid "" "ephemeral: this means that a client is only materialized when it is " "required in the FL process (e.g. to do `fit() `_, an open-source framework for scalable Python " -"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " -"of `Actors `_ to " -"spawn `virtual` clients and run their workload." +"workloads. In particular, Flower's ``VirtualClientEngine`` makes use of " +"`Actors `_ to spawn " +"`virtual` clients and run their workload." msgstr "" -#: ../../source/how-to-run-simulations.rst:20 +#: ../../source/how-to-run-simulations.rst:45 msgid "Launch your Flower simulation" msgstr "" -#: ../../source/how-to-run-simulations.rst:22 +#: ../../source/how-to-run-simulations.rst:47 msgid "" "Running Flower simulations still require you to define your client class," " a strategy, and utility functions to download and load (and potentially " @@ -6475,36 +6649,36 @@ msgid "" " as follows:" msgstr "" -#: ../../source/how-to-run-simulations.rst:44 +#: ../../source/how-to-run-simulations.rst:73 msgid "VirtualClientEngine resources" msgstr "" -#: ../../source/how-to-run-simulations.rst:45 +#: ../../source/how-to-run-simulations.rst:75 msgid "" "By default the VCE has access to all system resources (i.e. all CPUs, all" " GPUs, etc) since that is also the default behavior when starting Ray. " "However, in some settings you might want to limit how many of your system" " resources are used for simulation. You can do this via the " -":code:`ray_init_args` input argument to :code:`start_simulation` which " -"the VCE internally passes to Ray's :code:`ray.init` command. For a " -"complete list of settings you can configure check the `ray.init " +"``ray_init_args`` input argument to ``start_simulation`` which the VCE " +"internally passes to Ray's ``ray.init`` command. For a complete list of " +"settings you can configure check the `ray.init " "`_" -" documentation. Do not set :code:`ray_init_args` if you want the VCE to " -"use all your system's CPUs and GPUs." +" documentation. Do not set ``ray_init_args`` if you want the VCE to use " +"all your system's CPUs and GPUs." msgstr "" -#: ../../source/how-to-run-simulations.rst:62 +#: ../../source/how-to-run-simulations.rst:97 msgid "Assigning client resources" msgstr "" -#: ../../source/how-to-run-simulations.rst:63 +#: ../../source/how-to-run-simulations.rst:99 msgid "" -"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" -" nothing else) to each virtual client. This means that if your system has" -" 10 cores, that many virtual clients can be concurrently running." +"By default the ``VirtualClientEngine`` assigns a single CPU core (and " +"nothing else) to each virtual client. This means that if your system has " +"10 cores, that many virtual clients can be concurrently running." msgstr "" -#: ../../source/how-to-run-simulations.rst:65 +#: ../../source/how-to-run-simulations.rst:103 msgid "" "More often than not, you would probably like to adjust the resources your" " clients get assigned based on the complexity (i.e. compute and memory " @@ -6515,34 +6689,32 @@ msgid "" "our case Flower clients):" msgstr "" -#: ../../source/how-to-run-simulations.rst:67 -msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +#: ../../source/how-to-run-simulations.rst:110 +msgid "``num_cpus`` indicates the number of CPU cores a client would get." msgstr "" -#: ../../source/how-to-run-simulations.rst:68 -msgid "" -":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " -"assigned." +#: ../../source/how-to-run-simulations.rst:111 +msgid "``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned." msgstr "" -#: ../../source/how-to-run-simulations.rst:70 +#: ../../source/how-to-run-simulations.rst:113 msgid "Let's see a few examples:" msgstr "" -#: ../../source/how-to-run-simulations.rst:89 +#: ../../source/how-to-run-simulations.rst:132 msgid "" -"While the :code:`client_resources` can be used to control the degree of " +"While the ``client_resources`` can be used to control the degree of " "concurrency in your FL simulation, this does not stop you from running " "dozens, hundreds or even thousands of clients in the same round and " "having orders of magnitude more `dormant` (i.e. not participating in a " "round) clients. Let's say you want to have 100 clients per round but your" " system can only accommodate 8 clients concurrently. The " -":code:`VirtualClientEngine` will schedule 100 jobs to run (each " -"simulating a client sampled by the strategy) and then will execute them " -"in a resource-aware manner in batches of 8." +"``VirtualClientEngine`` will schedule 100 jobs to run (each simulating a " +"client sampled by the strategy) and then will execute them in a resource-" +"aware manner in batches of 8." msgstr "" -#: ../../source/how-to-run-simulations.rst:91 +#: ../../source/how-to-run-simulations.rst:140 msgid "" "To understand all the intricate details on how resources are used to " "schedule FL clients and how to define custom resources, please take a " @@ -6550,130 +6722,128 @@ msgid "" "core/scheduling/resources.html>`_." msgstr "" -#: ../../source/how-to-run-simulations.rst:94 +#: ../../source/how-to-run-simulations.rst:145 msgid "Simulation examples" msgstr "" -#: ../../source/how-to-run-simulations.rst:96 +#: ../../source/how-to-run-simulations.rst:147 msgid "" "A few ready-to-run complete examples for Flower simulation in " "Tensorflow/Keras and PyTorch are provided in the `Flower repository " "`_. You can run them on Google Colab too:" msgstr "" -#: ../../source/how-to-run-simulations.rst:98 +#: ../../source/how-to-run-simulations.rst:151 msgid "" "`Tensorflow/Keras Simulation " "`_: 100 clients collaboratively train a MLP model on MNIST." msgstr "" -#: ../../source/how-to-run-simulations.rst:99 +#: ../../source/how-to-run-simulations.rst:154 msgid "" "`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " "MNIST." msgstr "" -#: ../../source/how-to-run-simulations.rst:104 +#: ../../source/how-to-run-simulations.rst:159 msgid "Multi-node Flower simulations" msgstr "" -#: ../../source/how-to-run-simulations.rst:106 +#: ../../source/how-to-run-simulations.rst:161 msgid "" -"Flower's :code:`VirtualClientEngine` allows you to run FL simulations " -"across multiple compute nodes. Before starting your multi-node simulation" -" ensure that you:" +"Flower's ``VirtualClientEngine`` allows you to run FL simulations across " +"multiple compute nodes. Before starting your multi-node simulation ensure" +" that you:" msgstr "" -#: ../../source/how-to-run-simulations.rst:108 +#: ../../source/how-to-run-simulations.rst:164 msgid "Have the same Python environment in all nodes." msgstr "" -#: ../../source/how-to-run-simulations.rst:109 +#: ../../source/how-to-run-simulations.rst:165 msgid "Have a copy of your code (e.g. your entire repo) in all nodes." msgstr "" -#: ../../source/how-to-run-simulations.rst:110 +#: ../../source/how-to-run-simulations.rst:166 msgid "" "Have a copy of your dataset in all nodes (more about this in " ":ref:`simulation considerations `)" msgstr "" -#: ../../source/how-to-run-simulations.rst:111 +#: ../../source/how-to-run-simulations.rst:168 msgid "" -"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " -"`_ so the " -":code:`VirtualClientEngine` attaches to a running Ray instance." +"Pass ``ray_init_args={\"address\"=\"auto\"}`` to `start_simulation `_ so the " +"``VirtualClientEngine`` attaches to a running Ray instance." msgstr "" -#: ../../source/how-to-run-simulations.rst:112 +#: ../../source/how-to-run-simulations.rst:171 msgid "" -"Start Ray on you head node: on the terminal type :code:`ray start " -"--head`. This command will print a few lines, one of which indicates how " -"to attach other nodes to the head node." +"Start Ray on you head node: on the terminal type ``ray start --head``. " +"This command will print a few lines, one of which indicates how to attach" +" other nodes to the head node." msgstr "" -#: ../../source/how-to-run-simulations.rst:113 +#: ../../source/how-to-run-simulations.rst:174 msgid "" "Attach other nodes to the head node: copy the command shown after " "starting the head and execute it on terminal of a new node: for example " -":code:`ray start --address='192.168.1.132:6379'`" +"``ray start --address='192.168.1.132:6379'``" msgstr "" -#: ../../source/how-to-run-simulations.rst:115 +#: ../../source/how-to-run-simulations.rst:178 msgid "" "With all the above done, you can run your code from the head node as you " "would if the simulation was running on a single node." msgstr "" -#: ../../source/how-to-run-simulations.rst:117 +#: ../../source/how-to-run-simulations.rst:181 msgid "" "Once your simulation is finished, if you'd like to dismantle your cluster" -" you simply need to run the command :code:`ray stop` in each node's " -"terminal (including the head node)." +" you simply need to run the command ``ray stop`` in each node's terminal " +"(including the head node)." msgstr "" -#: ../../source/how-to-run-simulations.rst:120 +#: ../../source/how-to-run-simulations.rst:185 msgid "Multi-node simulation good-to-know" msgstr "" -#: ../../source/how-to-run-simulations.rst:122 +#: ../../source/how-to-run-simulations.rst:187 msgid "" "Here we list a few interesting functionality when running multi-node FL " "simulations:" msgstr "" -#: ../../source/how-to-run-simulations.rst:124 +#: ../../source/how-to-run-simulations.rst:189 msgid "" -"User :code:`ray status` to check all nodes connected to your head node as" -" well as the total resources available to the " -":code:`VirtualClientEngine`." +"User ``ray status`` to check all nodes connected to your head node as " +"well as the total resources available to the ``VirtualClientEngine``." msgstr "" -#: ../../source/how-to-run-simulations.rst:126 +#: ../../source/how-to-run-simulations.rst:192 msgid "" "When attaching a new node to the head, all its resources (i.e. all CPUs, " "all GPUs) will be visible by the head node. This means that the " -":code:`VirtualClientEngine` can schedule as many `virtual` clients as " -"that node can possible run. In some settings you might want to exclude " -"certain resources from the simulation. You can do this by appending " -"`--num-cpus=` and/or `--num-" -"gpus=` in any :code:`ray start` command (including " -"when starting the head)" +"``VirtualClientEngine`` can schedule as many `virtual` clients as that " +"node can possible run. In some settings you might want to exclude certain" +" resources from the simulation. You can do this by appending `--num-" +"cpus=` and/or `--num-gpus=` in " +"any ``ray start`` command (including when starting the head)" msgstr "" -#: ../../source/how-to-run-simulations.rst:132 +#: ../../source/how-to-run-simulations.rst:202 msgid "Considerations for simulations" msgstr "" -#: ../../source/how-to-run-simulations.rst:135 +#: ../../source/how-to-run-simulations.rst:206 msgid "" "We are actively working on these fronts so to make it trivial to run any " "FL workload with Flower simulation." msgstr "" -#: ../../source/how-to-run-simulations.rst:138 +#: ../../source/how-to-run-simulations.rst:209 msgid "" "The current VCE allows you to run Federated Learning workloads in " "simulation mode whether you are prototyping simple scenarios on your " @@ -6684,59 +6854,58 @@ msgid "" "couple of current limitations in our implementation." msgstr "" -#: ../../source/how-to-run-simulations.rst:141 +#: ../../source/how-to-run-simulations.rst:217 msgid "GPU resources" msgstr "" -#: ../../source/how-to-run-simulations.rst:143 +#: ../../source/how-to-run-simulations.rst:219 msgid "" "The VCE assigns a share of GPU memory to a client that specifies the key " -":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " +"``num_gpus`` in ``client_resources``. This being said, Ray (used " "internally by the VCE) is by default:" msgstr "" -#: ../../source/how-to-run-simulations.rst:146 +#: ../../source/how-to-run-simulations.rst:222 msgid "" "not aware of the total VRAM available on the GPUs. This means that if you" -" set :code:`num_gpus=0.5` and you have two GPUs in your system with " -"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" -" concurrently." +" set ``num_gpus=0.5`` and you have two GPUs in your system with different" +" (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients " +"concurrently." msgstr "" -#: ../../source/how-to-run-simulations.rst:147 +#: ../../source/how-to-run-simulations.rst:225 msgid "" "not aware of other unrelated (i.e. not created by the VCE) workloads are " "running on the GPU. Two takeaways from this are:" msgstr "" -#: ../../source/how-to-run-simulations.rst:149 +#: ../../source/how-to-run-simulations.rst:228 msgid "" "Your Flower server might need a GPU to evaluate the `global model` after " "aggregation (by instance when making use of the `evaluate method `_)" msgstr "" -#: ../../source/how-to-run-simulations.rst:150 +#: ../../source/how-to-run-simulations.rst:231 msgid "" "If you want to run several independent Flower simulations on the same " "machine you need to mask-out your GPUs with " -":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " -"experiment." +"``CUDA_VISIBLE_DEVICES=\"\"`` when launching your experiment." msgstr "" -#: ../../source/how-to-run-simulations.rst:153 +#: ../../source/how-to-run-simulations.rst:235 msgid "" -"In addition, the GPU resource limits passed to :code:`client_resources` " -"are not `enforced` (i.e. they can be exceeded) which can result in the " +"In addition, the GPU resource limits passed to ``client_resources`` are " +"not `enforced` (i.e. they can be exceeded) which can result in the " "situation of client using more VRAM than the ratio specified when " "starting the simulation." msgstr "" -#: ../../source/how-to-run-simulations.rst:156 +#: ../../source/how-to-run-simulations.rst:240 msgid "TensorFlow with GPUs" msgstr "" -#: ../../source/how-to-run-simulations.rst:158 +#: ../../source/how-to-run-simulations.rst:242 msgid "" "When `using a GPU with TensorFlow " "`_ nearly your entire GPU memory of" @@ -6748,28 +6917,28 @@ msgid "" "`_." msgstr "" -#: ../../source/how-to-run-simulations.rst:160 +#: ../../source/how-to-run-simulations.rst:249 msgid "" "This would need to be done in the main process (which is where the server" " would run) and in each Actor created by the VCE. By means of " -":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " -"in order to specify a function to be executed upon actor initialization. " -"In this case, to enable GPU growth for TF workloads. It would look as " +"``actor_kwargs`` we can pass the reserved key `\"on_actor_init_fn\"` in " +"order to specify a function to be executed upon actor initialization. In " +"this case, to enable GPU growth for TF workloads. It would look as " "follows:" msgstr "" -#: ../../source/how-to-run-simulations.rst:179 +#: ../../source/how-to-run-simulations.rst:272 msgid "" "This is precisely the mechanism used in `Tensorflow/Keras Simulation " "`_ example." msgstr "" -#: ../../source/how-to-run-simulations.rst:183 +#: ../../source/how-to-run-simulations.rst:276 msgid "Multi-node setups" msgstr "" -#: ../../source/how-to-run-simulations.rst:185 +#: ../../source/how-to-run-simulations.rst:278 msgid "" "The VCE does not currently offer a way to control on which node a " "particular `virtual` client is executed. In other words, if more than a " @@ -6782,7 +6951,7 @@ msgid "" "circumvent data duplication." msgstr "" -#: ../../source/how-to-run-simulations.rst:187 +#: ../../source/how-to-run-simulations.rst:286 msgid "" "By definition virtual clients are `stateless` due to their ephemeral " "nature. A client state can be implemented as part of the Flower client " @@ -6811,22 +6980,21 @@ msgstr "" #: ../../source/how-to-save-and-load-model-checkpoints.rst:10 msgid "" "Model updates can be persisted on the server-side by customizing " -":code:`Strategy` methods. Implementing custom strategies is always an " -"option, but for many cases it may be more convenient to simply customize " -"an existing strategy. The following code example defines a new " -":code:`SaveModelStrategy` which customized the existing built-in " -":code:`FedAvg` strategy. In particular, it customizes " -":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " -"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" -" before it returns those aggregated weights to the caller (i.e., the " -"server):" +"``Strategy`` methods. Implementing custom strategies is always an option," +" but for many cases it may be more convenient to simply customize an " +"existing strategy. The following code example defines a new " +"``SaveModelStrategy`` which customized the existing built-in ``FedAvg`` " +"strategy. In particular, it customizes ``aggregate_fit`` by calling " +"``aggregate_fit`` in the base class (``FedAvg``). It then continues to " +"save returned (aggregated) weights before it returns those aggregated " +"weights to the caller (i.e., the server):" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:53 msgid "Save and load PyTorch checkpoints" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:55 msgid "" "Similar to the previous example but with a few extra steps, we'll show " "how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " @@ -6836,14 +7004,14 @@ msgid "" " class structure." msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:98 msgid "" "To load your progress, you simply append the following lines to your " "code. Note that this will iterate over all saved checkpoints and load the" " latest one:" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:111 msgid "" "Return/use this object of type ``Parameters`` wherever necessary, such as" " in the ``initial_parameters`` when defining a ``Strategy``." @@ -6861,129 +7029,129 @@ msgid "" " to change the code of existing 0.x-series projects." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 -#: ../../source/how-to-upgrade-to-flower-next.rst:43 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-upgrade-to-flower-next.rst:63 msgid "Install update" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 msgid "" "Here's how to update an existing installation to Flower 1.0 using either " "pip or Poetry:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 msgid "pip: add ``-U`` when installing." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:16 msgid "" "``python -m pip install -U flwr`` (when using ``start_server`` and " "``start_client``)" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 msgid "" "``python -m pip install -U 'flwr[simulation]'`` (when using " "``start_simulation``)" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 msgid "" "Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " "reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " "before running ``poetry install``)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 msgid "" "``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " "using ``start_simulation``)" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 -#: ../../source/how-to-upgrade-to-flower-next.rst:100 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 +#: ../../source/how-to-upgrade-to-flower-next.rst:120 msgid "Required changes" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:30 msgid "The following breaking changes require manual updates." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 msgid "General" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:35 msgid "" "Pass all arguments as keyword arguments (not as positional arguments). " "Here's an example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:38 msgid "" "Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " "FlowerClient())``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 msgid "" "Flower 1.0 (keyword arguments): " "``start_client(server_address=\"127.0.0.1:8080\", " "client=FlowerClient())``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 #: ../../source/ref-api/flwr.client.Client.rst:2 msgid "Client" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 msgid "" "Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " "``def get_parameters(self, config):``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 msgid "" "Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " "get_parameters(self, ins: GetParametersIns):``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 msgid "Strategies / ``start_server`` / ``start_simulation``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:53 msgid "" "Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " "``start_simulation``. Here's an example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 msgid "" "Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " "\"round_timeout\": 600.0}, ...)``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 msgid "" "Flower 1.0: ``start_server(..., " "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 msgid "" "Replace ``num_rounds=1`` in ``start_simulation`` with the new " "``config=ServerConfig(...)`` (see previous item)" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:63 msgid "" "Remove ``force_final_distributed_eval`` parameter from calls to " "``start_server``. Distributed evaluation on all clients can be enabled by" @@ -6991,19 +7159,19 @@ msgid "" "last round of training." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:66 msgid "Rename parameter/ndarray conversion functions:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:69 msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 msgid "" "Strategy initialization: if the strategy relies on the default values for" " ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " @@ -7013,51 +7181,51 @@ msgid "" "FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:79 msgid "``fraction_eval`` --> ``fraction_evaluate``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 msgid "``min_eval_clients`` --> ``min_evaluate_clients``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:81 msgid "``eval_fn`` --> ``evaluate_fn``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:83 msgid "" "Rename ``rnd`` to ``server_round``. This impacts multiple methods and " "functions, for example, ``configure_fit``, ``aggregate_fit``, " "``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 msgid "" "Flower 0.19: ``def evaluate(parameters: NDArrays) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 msgid "" "Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " "config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " "Scalar]]]:``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:94 msgid "Custom strategies" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:96 msgid "" "The type of parameter ``failures`` has changed from " "``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " @@ -7066,35 +7234,35 @@ msgid "" "``aggregate_evaluate``)" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:100 msgid "" "The ``Strategy`` method ``evaluate`` now receives the current round of " "federated learning/evaluation as the first parameter:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 msgid "" "Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 msgid "" "Flower 1.0: ``def evaluate(self, server_round: int, parameters: " "Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 msgid "Optional improvements" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:111 msgid "" "Along with the necessary changes above, there are a number of potential " "improvements that just became possible:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 msgid "" "Remove \"placeholder\" methods from subclasses of ``Client`` or " "``NumPyClient``. If you, for example, use server-side evaluation, then " @@ -7102,19 +7270,19 @@ msgid "" "necessary." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:117 msgid "" "Configure the round timeout via ``start_simulation``: " "``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " "round_timeout=600.0), ...)``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 -#: ../../source/how-to-upgrade-to-flower-next.rst:317 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:121 +#: ../../source/how-to-upgrade-to-flower-next.rst:348 msgid "Further help" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 msgid "" "Most official `Flower code examples " "`_ are already updated" @@ -7136,7 +7304,7 @@ msgid "" "1.8." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:9 +#: ../../source/how-to-upgrade-to-flower-next.rst:11 msgid "" "This guide shows how to reuse pre-``1.8`` Flower code with minimum code " "changes by using the *compatibility layer* in Flower Next. In another " @@ -7144,48 +7312,48 @@ msgid "" "Next APIs." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:13 +#: ../../source/how-to-upgrade-to-flower-next.rst:15 msgid "Let's dive in!" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:48 +#: ../../source/how-to-upgrade-to-flower-next.rst:68 msgid "" "Here's how to update an existing installation of Flower to Flower Next " "with ``pip``:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:54 +#: ../../source/how-to-upgrade-to-flower-next.rst:74 msgid "or if you need Flower Next with simulation:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:61 +#: ../../source/how-to-upgrade-to-flower-next.rst:80 msgid "" "Ensure you set the following version constraint in your " "``requirements.txt``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:71 +#: ../../source/how-to-upgrade-to-flower-next.rst:90 msgid "or ``pyproject.toml``:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:82 +#: ../../source/how-to-upgrade-to-flower-next.rst:101 msgid "Using Poetry" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:84 +#: ../../source/how-to-upgrade-to-flower-next.rst:103 msgid "" "Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " "(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " "running ``poetry install``)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:86 +#: ../../source/how-to-upgrade-to-flower-next.rst:106 msgid "" "Ensure you set the following version constraint in your " "``pyproject.toml``:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:102 +#: ../../source/how-to-upgrade-to-flower-next.rst:122 msgid "" "In Flower Next, the *infrastructure* and *application layers* have been " "decoupled. Instead of starting a client in code via ``start_client()``, " @@ -7198,31 +7366,31 @@ msgid "" "way:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:109 +#: ../../source/how-to-upgrade-to-flower-next.rst:131 msgid "|clientapp_link|_" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:110 +#: ../../source/how-to-upgrade-to-flower-next.rst:133 msgid "" "Wrap your existing client with |clientapp_link|_ instead of launching it " "via |startclient_link|_. Here's an example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:132 +#: ../../source/how-to-upgrade-to-flower-next.rst:156 msgid "|serverapp_link|_" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:133 +#: ../../source/how-to-upgrade-to-flower-next.rst:158 msgid "" "Wrap your existing strategy with |serverapp_link|_ instead of starting " "the server via |startserver_link|_. Here's an example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:154 +#: ../../source/how-to-upgrade-to-flower-next.rst:179 msgid "Deployment" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:155 +#: ../../source/how-to-upgrade-to-flower-next.rst:181 msgid "" "Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " "in sequence, |flowernext_clientapp_link|_ (2x) and " @@ -7230,13 +7398,13 @@ msgid "" " `server.py` as Python scripts." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:158 +#: ../../source/how-to-upgrade-to-flower-next.rst:184 msgid "" "Here's an example to start the server without HTTPS (only for " "prototyping):" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:174 +#: ../../source/how-to-upgrade-to-flower-next.rst:200 msgid "" "Here's another example to start with HTTPS. Use the ``--ssl-ca-" "certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " @@ -7244,18 +7412,18 @@ msgid "" "private key)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:201 +#: ../../source/how-to-upgrade-to-flower-next.rst:228 msgid "Simulation in CLI" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:202 +#: ../../source/how-to-upgrade-to-flower-next.rst:230 msgid "" "Wrap your existing client and strategy with |clientapp_link|_ and " "|serverapp_link|_, respectively. There is no need to use |startsim_link|_" " anymore. Here's an example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:232 +#: ../../source/how-to-upgrade-to-flower-next.rst:263 msgid "" "Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " "``client_app`` object in the code instead of executing the Python script." @@ -7263,24 +7431,24 @@ msgid "" "objects are in a ``sim.py`` module):" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:249 +#: ../../source/how-to-upgrade-to-flower-next.rst:280 msgid "" "Set default resources for each |clientapp_link|_ using the ``--backend-" "config`` command line argument instead of setting the " "``client_resources`` argument in |startsim_link|_. Here's an example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:275 +#: ../../source/how-to-upgrade-to-flower-next.rst:304 msgid "Simulation in a Notebook" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:276 +#: ../../source/how-to-upgrade-to-flower-next.rst:306 msgid "" "Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " "an example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:319 +#: ../../source/how-to-upgrade-to-flower-next.rst:350 msgid "" "Some official `Flower code examples `_ " "are already updated to Flower Next so they can serve as a reference for " @@ -7291,18 +7459,18 @@ msgid "" " or share and learn from others about migrating to Flower Next." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:325 +#: ../../source/how-to-upgrade-to-flower-next.rst:357 msgid "Important" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:328 +#: ../../source/how-to-upgrade-to-flower-next.rst:359 msgid "" "As we continuously enhance Flower Next at a rapid pace, we'll be " "periodically updating this guide. Please feel free to share any feedback " "with us!" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:334 +#: ../../source/how-to-upgrade-to-flower-next.rst:365 msgid "Happy migrating! 🚀" msgstr "" @@ -7316,7 +7484,7 @@ msgid "" " interfaces may change in future versions.**" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:6 +#: ../../source/how-to-use-built-in-mods.rst:7 msgid "" "In this tutorial, we will learn how to utilize built-in mods to augment " "the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " @@ -7324,104 +7492,104 @@ msgid "" "the ``ClientApp``." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:9 +#: ../../source/how-to-use-built-in-mods.rst:12 msgid "What are Mods?" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:11 +#: ../../source/how-to-use-built-in-mods.rst:14 msgid "" "A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " "or inspect the incoming ``Message`` and the resulting outgoing " "``Message``. The signature for a ``Mod`` is as follows:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:18 +#: ../../source/how-to-use-built-in-mods.rst:23 msgid "A typical mod function might look something like this:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:31 +#: ../../source/how-to-use-built-in-mods.rst:36 msgid "Using Mods" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:33 +#: ../../source/how-to-use-built-in-mods.rst:38 msgid "To use mods in your ``ClientApp``, you can follow these steps:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:36 +#: ../../source/how-to-use-built-in-mods.rst:41 msgid "1. Import the required mods" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:38 +#: ../../source/how-to-use-built-in-mods.rst:43 msgid "First, import the built-in mod you intend to use:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:46 +#: ../../source/how-to-use-built-in-mods.rst:51 msgid "2. Define your client function" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:48 +#: ../../source/how-to-use-built-in-mods.rst:53 msgid "" "Define your client function (``client_fn``) that will be wrapped by the " "mod(s):" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:57 +#: ../../source/how-to-use-built-in-mods.rst:62 msgid "3. Create the ``ClientApp`` with mods" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:59 +#: ../../source/how-to-use-built-in-mods.rst:64 msgid "" "Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " "argument. The order in which you provide the mods matters:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:72 +#: ../../source/how-to-use-built-in-mods.rst:78 msgid "Order of execution" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:74 +#: ../../source/how-to-use-built-in-mods.rst:80 msgid "" "When the ``ClientApp`` runs, the mods are executed in the order they are " "provided in the list:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:76 +#: ../../source/how-to-use-built-in-mods.rst:83 msgid "``example_mod_1`` (outermost mod)" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:77 +#: ../../source/how-to-use-built-in-mods.rst:84 msgid "``example_mod_2`` (next mod)" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:78 +#: ../../source/how-to-use-built-in-mods.rst:85 msgid "" "Message handler (core function that handles the incoming ``Message`` and " "returns the outgoing ``Message``)" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:79 +#: ../../source/how-to-use-built-in-mods.rst:87 msgid "``example_mod_2`` (on the way back)" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:80 +#: ../../source/how-to-use-built-in-mods.rst:88 msgid "``example_mod_1`` (outermost mod on the way back)" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:82 +#: ../../source/how-to-use-built-in-mods.rst:90 msgid "" "Each mod has a chance to inspect and modify the incoming ``Message`` " "before passing it to the next mod, and likewise with the outgoing " "``Message`` before returning it up the stack." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:87 +#: ../../source/how-to-use-built-in-mods.rst:97 msgid "" "By following this guide, you have learned how to effectively use mods to " "enhance your ``ClientApp``'s functionality. Remember that the order of " "mods is crucial and affects how the input and output are processed." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:89 +#: ../../source/how-to-use-built-in-mods.rst:101 msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" msgstr "" @@ -7429,14 +7597,14 @@ msgstr "" msgid "Use Differential Privacy" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:3 +#: ../../source/how-to-use-differential-privacy.rst:4 msgid "" "This guide explains how you can utilize differential privacy in the " "Flower framework. If you are not yet familiar with differential privacy, " "you can refer to :doc:`explanation-differential-privacy`." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:7 +#: ../../source/how-to-use-differential-privacy.rst:10 msgid "" "Differential Privacy in Flower is in a preview phase. If you plan to use " "these features in a production environment with sensitive data, feel free" @@ -7444,7 +7612,7 @@ msgid "" "to best use these features." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:12 +#: ../../source/how-to-use-differential-privacy.rst:17 msgid "" "This approach consists of two separate phases: clipping of the updates " "and adding noise to the aggregated model. For the clipping phase, Flower " @@ -7452,7 +7620,7 @@ msgid "" "the server side or the client side." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:15 +#: ../../source/how-to-use-differential-privacy.rst:21 msgid "" "**Server-side Clipping**: This approach has the advantage of the server " "enforcing uniform clipping across all clients' updates and reducing the " @@ -7461,7 +7629,7 @@ msgid "" "the need to perform the clipping operation for all clients." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:16 +#: ../../source/how-to-use-differential-privacy.rst:26 msgid "" "**Client-side Clipping**: This approach has the advantage of reducing the" " computational overhead on the server. However, it also has the " @@ -7469,69 +7637,68 @@ msgid "" "control over the clipping process." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:21 +#: ../../source/how-to-use-differential-privacy.rst:31 msgid "Server-side Clipping" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:22 +#: ../../source/how-to-use-differential-privacy.rst:33 msgid "" -"For central DP with server-side clipping, there are two :code:`Strategy` " -"classes that act as wrappers around the actual :code:`Strategy` instance " -"(for example, :code:`FedAvg`). The two wrapper classes are " -":code:`DifferentialPrivacyServerSideFixedClipping` and " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " -"adaptive clipping." +"For central DP with server-side clipping, there are two ``Strategy`` " +"classes that act as wrappers around the actual ``Strategy`` instance (for" +" example, ``FedAvg``). The two wrapper classes are " +"``DifferentialPrivacyServerSideFixedClipping`` and " +"``DifferentialPrivacyServerSideAdaptiveClipping`` for fixed and adaptive " +"clipping." msgstr "" #: ../../source/how-to-use-differential-privacy.rst:-1 msgid "server side clipping" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:31 +#: ../../source/how-to-use-differential-privacy.rst:43 msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use server-" -"side fixed clipping using the " -":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " -"same approach can be used with " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " +"The code sample below enables the ``FedAvg`` strategy to use server-side " +"fixed clipping using the ``DifferentialPrivacyServerSideFixedClipping`` " +"wrapper class. The same approach can be used with " +"``DifferentialPrivacyServerSideAdaptiveClipping`` by adjusting the " "corresponding input parameters." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:52 +#: ../../source/how-to-use-differential-privacy.rst:64 msgid "Client-side Clipping" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:53 +#: ../../source/how-to-use-differential-privacy.rst:66 msgid "" "For central DP with client-side clipping, the server sends the clipping " "value to selected clients on each round. Clients can use existing Flower " -":code:`Mods` to perform the clipping. Two mods are available for fixed " -"and adaptive client-side clipping: :code:`fixedclipping_mod` and " -":code:`adaptiveclipping_mod` with corresponding server-side wrappers " -":code:`DifferentialPrivacyClientSideFixedClipping` and " -":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +"``Mods`` to perform the clipping. Two mods are available for fixed and " +"adaptive client-side clipping: ``fixedclipping_mod`` and " +"``adaptiveclipping_mod`` with corresponding server-side wrappers " +"``DifferentialPrivacyClientSideFixedClipping`` and " +"``DifferentialPrivacyClientSideAdaptiveClipping``." msgstr "" #: ../../source/how-to-use-differential-privacy.rst:-1 msgid "client side clipping" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:63 +#: ../../source/how-to-use-differential-privacy.rst:78 msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use " -"differential privacy with client-side fixed clipping using both the " -":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " -"the client, :code:`fixedclipping_mod`:" +"The code sample below enables the ``FedAvg`` strategy to use differential" +" privacy with client-side fixed clipping using both the " +"``DifferentialPrivacyClientSideFixedClipping`` wrapper class and, on the " +"client, ``fixedclipping_mod``:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:80 +#: ../../source/how-to-use-differential-privacy.rst:97 msgid "" -"In addition to the server-side strategy wrapper, the :code:`ClientApp` " -"needs to configure the matching :code:`fixedclipping_mod` to perform the " -"client-side clipping:" +"In addition to the server-side strategy wrapper, the ``ClientApp`` needs " +"to configure the matching ``fixedclipping_mod`` to perform the client-" +"side clipping:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:97 +#: ../../source/how-to-use-differential-privacy.rst:115 msgid "" "To utilize local differential privacy (DP) and add noise to the client " "model parameters before transmitting them to the server in Flower, you " @@ -7543,11 +7710,11 @@ msgstr "" msgid "local DP mod" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:104 -msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +#: ../../source/how-to-use-differential-privacy.rst:125 +msgid "Below is a code example that shows how to use ``LocalDpMod``:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:122 +#: ../../source/how-to-use-differential-privacy.rst:140 msgid "" "Please note that the order of mods, especially those that modify " "parameters, is important when using multiple modifiers. Typically, " @@ -7555,11 +7722,11 @@ msgid "" "parameters." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:125 +#: ../../source/how-to-use-differential-privacy.rst:145 msgid "Local Training using Privacy Engines" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:126 +#: ../../source/how-to-use-differential-privacy.rst:147 msgid "" "For ensuring data instance-level privacy during local model training on " "the client side, consider leveraging privacy engines such as Opacus and " @@ -7577,96 +7744,94 @@ msgstr "" #: ../../source/how-to-use-strategies.rst:4 msgid "" "Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." +"``Strategy`` abstraction. A number of built-in strategies are provided in" +" the core framework." msgstr "" -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-use-strategies.rst:7 msgid "" "There are three ways to customize the way Flower orchestrates the " "learning process on the server side:" msgstr "" -#: ../../source/how-to-use-strategies.rst:8 -msgid "Use an existing strategy, for example, :code:`FedAvg`" +#: ../../source/how-to-use-strategies.rst:10 +msgid "Use an existing strategy, for example, ``FedAvg``" msgstr "" -#: ../../source/how-to-use-strategies.rst:9 -#: ../../source/how-to-use-strategies.rst:40 +#: ../../source/how-to-use-strategies.rst:11 +#: ../../source/how-to-use-strategies.rst:43 msgid "Customize an existing strategy with callback functions" msgstr "" -#: ../../source/how-to-use-strategies.rst:10 -#: ../../source/how-to-use-strategies.rst:87 +#: ../../source/how-to-use-strategies.rst:12 +#: ../../source/how-to-use-strategies.rst:99 msgid "Implement a novel strategy" msgstr "" -#: ../../source/how-to-use-strategies.rst:14 +#: ../../source/how-to-use-strategies.rst:15 msgid "Use an existing strategy" msgstr "" -#: ../../source/how-to-use-strategies.rst:16 +#: ../../source/how-to-use-strategies.rst:17 msgid "" "Flower comes with a number of popular federated learning strategies " "built-in. A built-in strategy can be instantiated as follows:" msgstr "" -#: ../../source/how-to-use-strategies.rst:25 +#: ../../source/how-to-use-strategies.rst:27 msgid "" "This creates a strategy with all parameters left at their default values " -"and passes it to the :code:`start_server` function. It is usually " -"recommended to adjust a few parameters during instantiation:" +"and passes it to the ``start_server`` function. It is usually recommended" +" to adjust a few parameters during instantiation:" msgstr "" -#: ../../source/how-to-use-strategies.rst:42 +#: ../../source/how-to-use-strategies.rst:45 msgid "" "Existing strategies provide several ways to customize their behaviour. " "Callback functions allow strategies to call user-provided code during " "execution." msgstr "" -#: ../../source/how-to-use-strategies.rst:45 +#: ../../source/how-to-use-strategies.rst:49 msgid "Configuring client fit and client evaluate" msgstr "" -#: ../../source/how-to-use-strategies.rst:47 +#: ../../source/how-to-use-strategies.rst:51 msgid "" "The server can pass new configuration values to the client each round by " -"providing a function to :code:`on_fit_config_fn`. The provided function " -"will be called by the strategy and must return a dictionary of " -"configuration key values pairs that will be sent to the client. It must " -"return a dictionary of arbitrary configuration values :code:`client.fit`" -" and :code:`client.evaluate` functions during each round of federated " -"learning." +"providing a function to ``on_fit_config_fn``. The provided function will " +"be called by the strategy and must return a dictionary of configuration " +"key values pairs that will be sent to the client. It must return a " +"dictionary of arbitrary configuration values ``client.fit`` and " +"``client.evaluate`` functions during each round of federated learning." msgstr "" -#: ../../source/how-to-use-strategies.rst:75 +#: ../../source/how-to-use-strategies.rst:84 msgid "" -"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " +"The ``on_fit_config_fn`` can be used to pass arbitrary configuration " "values from server to client, and potentially change these values each " "round, for example, to adjust the learning rate. The client will receive " -"the dictionary returned by the :code:`on_fit_config_fn` in its own " -":code:`client.fit()` function." +"the dictionary returned by the ``on_fit_config_fn`` in its own " +"``client.fit()`` function." msgstr "" -#: ../../source/how-to-use-strategies.rst:78 +#: ../../source/how-to-use-strategies.rst:89 msgid "" -"Similar to :code:`on_fit_config_fn`, there is also " -":code:`on_evaluate_config_fn` to customize the configuration sent to " -":code:`client.evaluate()`" +"Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` " +"to customize the configuration sent to ``client.evaluate()``" msgstr "" -#: ../../source/how-to-use-strategies.rst:81 +#: ../../source/how-to-use-strategies.rst:93 msgid "Configuring server-side evaluation" msgstr "" -#: ../../source/how-to-use-strategies.rst:83 +#: ../../source/how-to-use-strategies.rst:95 msgid "" "Server-side evaluation can be enabled by passing an evaluation function " -"to :code:`evaluate_fn`." +"to ``evaluate_fn``." msgstr "" -#: ../../source/how-to-use-strategies.rst:89 +#: ../../source/how-to-use-strategies.rst:101 msgid "" "Writing a fully custom strategy is a bit more involved, but it provides " "the most flexibility. Read the `Implementing Strategies ` | :doc:`iOS `" msgstr "" -#: ../../source/index.rst:64 +#: ../../source/index.rst:70 msgid "We also made video tutorials for PyTorch:" msgstr "" -#: ../../source/index.rst:69 +#: ../../source/index.rst:75 msgid "And TensorFlow:" msgstr "" -#: ../../source/index.rst:77 +#: ../../source/index.rst:83 msgid "" "Problem-oriented how-to guides show step-by-step how to achieve a " "specific goal." msgstr "" -#: ../../source/index.rst:110 +#: ../../source/index.rst:116 msgid "" "Understanding-oriented concept guides explain and discuss key topics and " "underlying ideas behind Flower and collaborative AI." msgstr "" -#: ../../source/index.rst:121 +#: ../../source/index.rst:128 msgid "References" msgstr "" -#: ../../source/index.rst:123 +#: ../../source/index.rst:130 msgid "Information-oriented API reference and other reference material." msgstr "" -#: ../../source/index.rst:132::1 +#: ../../source/index.rst:139::1 msgid ":py:obj:`flwr `\\" msgstr "" -#: ../../source/index.rst:132::1 flwr:1 of +#: ../../source/index.rst:139::1 flwr:1 of msgid "Flower main package." msgstr "" -#: ../../source/index.rst:149 +#: ../../source/index.rst:155 msgid "Contributor docs" msgstr "" -#: ../../source/index.rst:151 +#: ../../source/index.rst:157 msgid "" "The Flower community welcomes contributions. The following docs are " "intended to help along the way." @@ -7927,6 +8092,10 @@ msgstr "Argumento de compilação" msgid "Optional argument" msgstr "Argumento de compilação" +#: ../../flwr install:1 +msgid "The source FAB file to install." +msgstr "" + #: ../../flwr log:1 msgid "Get logs from a Flower project run." msgstr "" @@ -7935,7 +8104,7 @@ msgstr "" msgid "Flag to stream or print logs from the Flower run" msgstr "" -#: ../../flwr log +#: ../../flwr log run msgid "default" msgstr "" @@ -7948,6 +8117,18 @@ msgstr "" msgid "Required argument" msgstr "Argumento de compilação" +#: ../../flwr log:1 +msgid "The Flower run ID to query" +msgstr "" + +#: ../../flwr log:1 +msgid "Path of the Flower project to run" +msgstr "" + +#: ../../flwr log:1 +msgid "Name of the federation to run the app on" +msgstr "" + #: ../../flwr new:1 msgid "Create new Flower App." msgstr "" @@ -7970,6 +8151,11 @@ msgstr "" msgid "The Flower username of the author" msgstr "" +#: ../../flwr new:1 +#, fuzzy +msgid "The name of the Flower App" +msgstr "O nome do repositório da imagem base." + #: ../../flwr run:1 msgid "Run Flower App." msgstr "" @@ -7990,6 +8176,26 @@ msgid "" " the `pyproject.toml` in order to be properly overriden." msgstr "" +#: ../../flwr run:1 +msgid "" +"Use `--stream` with `flwr run` to display logs; logs are not streamed by " +"default." +msgstr "" + +#: ../../flwr run:1 +#, fuzzy +msgid "``False``" +msgstr "``FLWR_VERSION``" + +#: ../../flwr run:1 +#, fuzzy +msgid "Path of the Flower App to run." +msgstr "O nome do repositório da imagem base." + +#: ../../flwr run:1 +msgid "Name of the federation to run the app on." +msgstr "" + #: ../../source/ref-api-cli.rst:16 msgid "flower-simulation" msgstr "" @@ -8006,17 +8212,16 @@ msgstr "" msgid "flower-server-app" msgstr "" -#: ../../source/ref-api-cli.rst:49 +#: ../../source/ref-api-cli.rst:50 msgid "" -"Note that since version :code:`1.11.0`, :code:`flower-server-app` no " -"longer supports passing a reference to a `ServerApp` attribute. Instead, " -"you need to pass the path to Flower app via the argument :code:`--app`. " -"This is the path to a directory containing a `pyproject.toml`. You can " -"create a valid Flower app by executing :code:`flwr new` and following the" -" prompt." +"Note that since version ``1.11.0``, ``flower-server-app`` no longer " +"supports passing a reference to a `ServerApp` attribute. Instead, you " +"need to pass the path to Flower app via the argument ``--app``. This is " +"the path to a directory containing a `pyproject.toml`. You can create a " +"valid Flower app by executing ``flwr new`` and following the prompt." msgstr "" -#: ../../source/ref-api-cli.rst:62 +#: ../../source/ref-api-cli.rst:64 msgid "flower-superexec" msgstr "" @@ -20338,10 +20543,12 @@ msgid "" "`_." msgstr "" -#: ../../source/ref-example-projects.rst:10 -msgid "" -"The following examples are available as standalone projects. Quickstart " -"TensorFlow/Keras ---------------------------" +#: ../../source/ref-example-projects.rst:9 +msgid "The following examples are available as standalone projects." +msgstr "" + +#: ../../source/ref-example-projects.rst:12 +msgid "Quickstart TensorFlow/Keras" msgstr "" #: ../../source/ref-example-projects.rst:14 @@ -20357,77 +20564,77 @@ msgid "" "tensorflow>`_" msgstr "" -#: ../../source/ref-example-projects.rst:18 +#: ../../source/ref-example-projects.rst:19 msgid ":doc:`Quickstart TensorFlow (Tutorial) `" msgstr "" -#: ../../source/ref-example-projects.rst:19 +#: ../../source/ref-example-projects.rst:20 msgid "" "`Quickstart TensorFlow (Blog Post) `_" msgstr "" -#: ../../source/ref-example-projects.rst:23 -#: ../../source/tutorial-quickstart-pytorch.rst:5 +#: ../../source/ref-example-projects.rst:24 +#: ../../source/tutorial-quickstart-pytorch.rst:4 msgid "Quickstart PyTorch" msgstr "" -#: ../../source/ref-example-projects.rst:25 +#: ../../source/ref-example-projects.rst:26 msgid "" "The PyTorch quickstart example shows CIFAR-10 image classification with a" " simple Convolutional Neural Network:" msgstr "" -#: ../../source/ref-example-projects.rst:28 +#: ../../source/ref-example-projects.rst:29 msgid "" "`Quickstart PyTorch (Code) " "`_" msgstr "" -#: ../../source/ref-example-projects.rst:29 +#: ../../source/ref-example-projects.rst:31 msgid ":doc:`Quickstart PyTorch (Tutorial) `" msgstr "" -#: ../../source/ref-example-projects.rst:33 +#: ../../source/ref-example-projects.rst:34 msgid "PyTorch: From Centralized To Federated" msgstr "" -#: ../../source/ref-example-projects.rst:35 +#: ../../source/ref-example-projects.rst:36 msgid "" "This example shows how a regular PyTorch project can be federated using " "Flower:" msgstr "" -#: ../../source/ref-example-projects.rst:37 +#: ../../source/ref-example-projects.rst:38 msgid "" "`PyTorch: From Centralized To Federated (Code) " "`_" msgstr "" -#: ../../source/ref-example-projects.rst:38 +#: ../../source/ref-example-projects.rst:40 msgid "" ":doc:`PyTorch: From Centralized To Federated (Tutorial) `" msgstr "" -#: ../../source/ref-example-projects.rst:42 +#: ../../source/ref-example-projects.rst:44 msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" msgstr "" -#: ../../source/ref-example-projects.rst:44 +#: ../../source/ref-example-projects.rst:46 msgid "" "This example shows how Flower can be used to build a federated learning " "system that run across Raspberry Pi and Nvidia Jetson:" msgstr "" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-example-projects.rst:49 msgid "" "`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " "`_" msgstr "" -#: ../../source/ref-example-projects.rst:47 +#: ../../source/ref-example-projects.rst:51 msgid "" "`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " "`_" @@ -20443,20 +20650,20 @@ msgstr "" msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" msgstr "" -#: ../../source/ref-faq.rst:8 +#: ../../source/ref-faq.rst:9 msgid "" "Yes, it can! Flower even comes with a few under-the-hood optimizations to" " make it work even better on Colab. Here's a quickstart example:" msgstr "" -#: ../../source/ref-faq.rst:10 +#: ../../source/ref-faq.rst:11 msgid "" "`Flower simulation PyTorch " "`_" msgstr "" -#: ../../source/ref-faq.rst:11 +#: ../../source/ref-faq.rst:12 msgid "" "`Flower simulation TensorFlow/Keras " "`_" @@ -20479,20 +20686,20 @@ msgstr "" msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" msgstr "" -#: ../../source/ref-faq.rst:19 +#: ../../source/ref-faq.rst:20 msgid "" "Yes, it does. Please take a look at our `blog post " "`_ or check out the code examples:" msgstr "" -#: ../../source/ref-faq.rst:21 +#: ../../source/ref-faq.rst:22 msgid "" "`Android Kotlin example `_" msgstr "" -#: ../../source/ref-faq.rst:22 +#: ../../source/ref-faq.rst:23 msgid "`Android Java example `_" msgstr "" @@ -20500,50 +20707,50 @@ msgstr "" msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" msgstr "" -#: ../../source/ref-faq.rst:26 +#: ../../source/ref-faq.rst:27 msgid "" "Yes, of course. A list of available examples using Flower within a " "blockchain environment is available here:" msgstr "" -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-faq.rst:30 msgid "`FLock: A Decentralised AI Training Platform `_." msgstr "" -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-faq.rst:30 msgid "Contribute to on-chain training the model and earn rewards." msgstr "" -#: ../../source/ref-faq.rst:30 +#: ../../source/ref-faq.rst:31 msgid "Local blockchain with federated learning simulation." msgstr "" -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-faq.rst:32 msgid "" "`Flower meets Nevermined GitHub Repository `_." msgstr "" -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-faq.rst:33 msgid "" "`Flower meets Nevermined YouTube video " "`_." msgstr "" -#: ../../source/ref-faq.rst:33 +#: ../../source/ref-faq.rst:34 msgid "" "`Flower meets KOSMoS `_." msgstr "" -#: ../../source/ref-faq.rst:34 +#: ../../source/ref-faq.rst:35 msgid "" "`Flower meets Talan blog post `_ ." msgstr "" -#: ../../source/ref-faq.rst:35 +#: ../../source/ref-faq.rst:36 msgid "" "`Flower meets Talan GitHub Repository " "`_ ." @@ -20749,28 +20956,28 @@ msgid "" "app using Flower." msgstr "" -#: ../../source/tutorial-quickstart-android.rst:5 +#: ../../source/tutorial-quickstart-android.rst:4 msgid "Quickstart Android" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:10 +#: ../../source/tutorial-quickstart-android.rst:9 msgid "" "Let's build a federated learning system using TFLite and Flower on " "Android!" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:12 +#: ../../source/tutorial-quickstart-android.rst:11 msgid "" "Please refer to the `full code example " "`_ to learn " "more." msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:5 +#: ../../source/tutorial-quickstart-fastai.rst:4 msgid "Quickstart fastai" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:7 +#: ../../source/tutorial-quickstart-fastai.rst:6 msgid "" "In this federated learning tutorial we will learn how to train a " "SqueezeNet model on MNIST using Flower and fastai. It is recommended to " @@ -20778,23 +20985,23 @@ msgid "" " `." msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:12 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +#: ../../source/tutorial-quickstart-fastai.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 msgid "Then, clone the code example directly from GitHub:" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:20 +#: ../../source/tutorial-quickstart-fastai.rst:18 msgid "" "This will create a new directory called `quickstart-fastai` containing " "the following files:" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:33 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:33 +#: ../../source/tutorial-quickstart-fastai.rst:31 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 msgid "Next, activate your environment, then run:" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:43 +#: ../../source/tutorial-quickstart-fastai.rst:41 msgid "" "This example by default runs the Flower Simulation Engine, creating a " "federation of 10 nodes using `FedAvg `_ of this tutorial in ``examples/quickstart-fasai`` " @@ -20837,11 +21044,11 @@ msgid "" "with 🤗 HuggingFace Transformers in order to fine-tune an LLM." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:5 +#: ../../source/tutorial-quickstart-huggingface.rst:4 msgid "Quickstart 🤗 Transformers" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:7 +#: ../../source/tutorial-quickstart-huggingface.rst:6 msgid "" "In this federated learning tutorial we will learn how to train a large " "language model (LLM) on the `IMDB " @@ -20851,7 +21058,7 @@ msgid "" "`." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:14 +#: ../../source/tutorial-quickstart-huggingface.rst:12 msgid "" "Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " "project. It will generate all the files needed to run, by default with " @@ -20860,73 +21067,73 @@ msgid "" "|iidpartitioner|_." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:20 -#: ../../source/tutorial-quickstart-mlx.rst:19 -#: ../../source/tutorial-quickstart-pytorch.rst:19 -#: ../../source/tutorial-quickstart-tensorflow.rst:20 +#: ../../source/tutorial-quickstart-huggingface.rst:17 +#: ../../source/tutorial-quickstart-mlx.rst:17 +#: ../../source/tutorial-quickstart-pytorch.rst:18 +#: ../../source/tutorial-quickstart-tensorflow.rst:18 msgid "" "Now that we have a rough idea of what this example is about, let's get " "started. First, install Flower in your new environment:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:28 +#: ../../source/tutorial-quickstart-huggingface.rst:25 msgid "" "Then, run the command below. You will be prompted to select one of the " "available templates (choose ``HuggingFace``), give a name to your " "project, and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:36 -#: ../../source/tutorial-quickstart-mlx.rst:35 -#: ../../source/tutorial-quickstart-pytorch.rst:35 -#: ../../source/tutorial-quickstart-tensorflow.rst:36 +#: ../../source/tutorial-quickstart-huggingface.rst:33 +#: ../../source/tutorial-quickstart-mlx.rst:32 +#: ../../source/tutorial-quickstart-pytorch.rst:34 +#: ../../source/tutorial-quickstart-tensorflow.rst:34 msgid "" "After running it you'll notice a new directory with your project name has" " been created. It should have the following structure:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:50 -#: ../../source/tutorial-quickstart-mlx.rst:49 -#: ../../source/tutorial-quickstart-pytorch.rst:49 -#: ../../source/tutorial-quickstart-tensorflow.rst:50 +#: ../../source/tutorial-quickstart-huggingface.rst:47 +#: ../../source/tutorial-quickstart-mlx.rst:46 +#: ../../source/tutorial-quickstart-pytorch.rst:48 +#: ../../source/tutorial-quickstart-tensorflow.rst:48 msgid "" "If you haven't yet installed the project and its dependencies, you can do" " so by:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:58 -#: ../../source/tutorial-quickstart-pytorch.rst:57 -#: ../../source/tutorial-quickstart-tensorflow.rst:58 +#: ../../source/tutorial-quickstart-huggingface.rst:54 +#: ../../source/tutorial-quickstart-pytorch.rst:55 +#: ../../source/tutorial-quickstart-tensorflow.rst:55 msgid "To run the project, do:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:106 +#: ../../source/tutorial-quickstart-huggingface.rst:102 msgid "You can also run the project with GPU as follows:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:113 +#: ../../source/tutorial-quickstart-huggingface.rst:109 msgid "" "This will use the default arguments where each ``ClientApp`` will use 2 " "CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:124 -#: ../../source/tutorial-quickstart-mlx.rst:114 -#: ../../source/tutorial-quickstart-pytorch.rst:113 +#: ../../source/tutorial-quickstart-huggingface.rst:120 +#: ../../source/tutorial-quickstart-mlx.rst:110 +#: ../../source/tutorial-quickstart-pytorch.rst:111 msgid "" "What follows is an explanation of each component in the project you just " "created: dataset partition, the model, defining the ``ClientApp`` and " "defining the ``ServerApp``." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:130 -#: ../../source/tutorial-quickstart-mlx.rst:120 -#: ../../source/tutorial-quickstart-pytorch.rst:119 -#: ../../source/tutorial-quickstart-tensorflow.rst:116 +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 msgid "The Data" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:132 +#: ../../source/tutorial-quickstart-huggingface.rst:126 msgid "" "This tutorial uses |flowerdatasets|_ to easily download and partition the" " `IMDB `_ dataset. In " @@ -20939,14 +21146,14 @@ msgid "" "their data partition." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:178 -#: ../../source/tutorial-quickstart-mlx.rst:164 -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#: ../../source/tutorial-quickstart-tensorflow.rst:145 +#: ../../source/tutorial-quickstart-huggingface.rst:171 +#: ../../source/tutorial-quickstart-mlx.rst:155 +#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-quickstart-tensorflow.rst:139 msgid "The Model" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:180 +#: ../../source/tutorial-quickstart-huggingface.rst:173 msgid "" "We will leverage 🤗 Hugging Face to federate the training of language " "models over multiple clients using Flower. More specifically, we will " @@ -20956,13 +21163,13 @@ msgid "" "larger GPUs, feel free to use larger models!" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:193 +#: ../../source/tutorial-quickstart-huggingface.rst:185 msgid "" "Note that here, ``model_name`` is a string that will be loaded from the " "``Context`` in the ClientApp and ServerApp." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:196 +#: ../../source/tutorial-quickstart-huggingface.rst:188 msgid "" "In addition to loading the pretrained model weights and architecture, we " "also include two utility functions to perform both training (i.e. " @@ -20975,14 +21182,14 @@ msgid "" "perform local training or evaluation:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:239 -#: ../../source/tutorial-quickstart-mlx.rst:210 -#: ../../source/tutorial-quickstart-pytorch.rst:234 -#: ../../source/tutorial-quickstart-tensorflow.rst:176 +#: ../../source/tutorial-quickstart-huggingface.rst:228 +#: ../../source/tutorial-quickstart-mlx.rst:199 +#: ../../source/tutorial-quickstart-pytorch.rst:224 +#: ../../source/tutorial-quickstart-tensorflow.rst:168 msgid "The ClientApp" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-huggingface.rst:230 msgid "" "The main changes we have to make to use 🤗 Hugging Face with Flower will " "be found in the ``get_weights()`` and ``set_weights()`` functions. Under " @@ -20995,8 +21202,8 @@ msgid "" "them to an existing PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:254 -#: ../../source/tutorial-quickstart-pytorch.rst:245 +#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-pytorch.rst:234 msgid "" "The specific implementation of ``get_weights()`` and ``set_weights()`` " "depends on the type of models you use. The ones shown below work for a " @@ -21004,8 +21211,8 @@ msgid "" "have more exotic model architectures." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:269 -#: ../../source/tutorial-quickstart-pytorch.rst:261 +#: ../../source/tutorial-quickstart-huggingface.rst:257 +#: ../../source/tutorial-quickstart-pytorch.rst:250 msgid "" "The rest of the functionality is directly inspired by the centralized " "case. The ``fit()`` method in the client trains the model using the local" @@ -21013,7 +21220,7 @@ msgid "" "model received on a held-out validation set that the client might have:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:296 +#: ../../source/tutorial-quickstart-huggingface.rst:283 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " "defined above by means of a ``client_fn()`` callback. Note that the " @@ -21024,14 +21231,14 @@ msgid "" "additional hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:330 -#: ../../source/tutorial-quickstart-mlx.rst:376 -#: ../../source/tutorial-quickstart-pytorch.rst:321 -#: ../../source/tutorial-quickstart-tensorflow.rst:245 +#: ../../source/tutorial-quickstart-huggingface.rst:316 +#: ../../source/tutorial-quickstart-mlx.rst:361 +#: ../../source/tutorial-quickstart-pytorch.rst:307 +#: ../../source/tutorial-quickstart-tensorflow.rst:232 msgid "The ServerApp" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:332 +#: ../../source/tutorial-quickstart-huggingface.rst:318 msgid "" "To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" " identical signature to that of ``client_fn()`` but the return type is " @@ -21042,13 +21249,13 @@ msgid "" "value defined in the ``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:371 +#: ../../source/tutorial-quickstart-huggingface.rst:356 msgid "" "Congratulations! You've successfully built and run your first federated " "learning system for an LLM." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:376 +#: ../../source/tutorial-quickstart-huggingface.rst:361 msgid "" "Check the source code of the extended version of this tutorial in " "|quickstart_hf_link|_ in the Flower GitHub repository. For a " @@ -21062,11 +21269,11 @@ msgid "" "using Flower to train a neural network on MNIST." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:5 +#: ../../source/tutorial-quickstart-ios.rst:4 msgid "Quickstart iOS" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:10 +#: ../../source/tutorial-quickstart-ios.rst:9 msgid "" "In this tutorial we will learn how to train a Neural Network on MNIST " "using Flower and CoreML on iOS devices." @@ -21080,13 +21287,13 @@ msgid "" "implementation in iOS, it is recommended to use Xcode as our IDE." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:15 +#: ../../source/tutorial-quickstart-ios.rst:17 msgid "" "Our example consists of one Python *server* and two iPhone *clients* that" " all have the same model." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:17 +#: ../../source/tutorial-quickstart-ios.rst:20 msgid "" "*Clients* are responsible for generating individual weight updates for " "the model based on their local datasets. These updates are then sent to " @@ -21095,24 +21302,24 @@ msgid "" "each *client*. A complete cycle of weight updates is called a *round*." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:21 +#: ../../source/tutorial-quickstart-ios.rst:26 msgid "" "Now that we have a rough idea of what is going on, let's get started to " "setup our Flower server environment. We first need to install Flower. You" " can do this by using pip:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:27 +#: ../../source/tutorial-quickstart-ios.rst:33 msgid "Or Poetry:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-xgboost.rst:55 +#: ../../source/tutorial-quickstart-ios.rst:40 +#: ../../source/tutorial-quickstart-scikitlearn.rst:43 +#: ../../source/tutorial-quickstart-xgboost.rst:65 msgid "Flower Client" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:36 +#: ../../source/tutorial-quickstart-ios.rst:42 msgid "" "Now that we have all our dependencies installed, let's run a simple " "distributed training using CoreML as our local training pipeline and " @@ -21121,91 +21328,90 @@ msgid "" "the Swift SDK. The client implementation can be seen below:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:72 +#: ../../source/tutorial-quickstart-ios.rst:80 msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a" -" dependency in your project. For our application, we will store the logic" -" of our app in :code:`FLiOSModel.swift` and the UI elements in " -":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" -" in this quickstart. Please refer to the `full code example " +"Let's create a new application project in Xcode and add ``flwr`` as a " +"dependency in your project. For our application, we will store the logic " +"of our app in ``FLiOSModel.swift`` and the UI elements in " +"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" +" quickstart. Please refer to the `full code example " "`_ to learn more " "about the app." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:75 -msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#: ../../source/tutorial-quickstart-ios.rst:86 +msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:83 +#: ../../source/tutorial-quickstart-ios.rst:94 msgid "" "Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " "will be bundled inside the application during deployment to your iOS " "device. We need to pass the url to access mlmodel and run CoreML machine " "learning processes, it can be retrieved by calling the function " -":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " -"into :code:`MLBatchProvider` object. The preprocessing is done inside " -":code:`DataLoader.swift`." +"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" +" ``MLBatchProvider`` object. The preprocessing is done inside " +"``DataLoader.swift``." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:99 +#: ../../source/tutorial-quickstart-ios.rst:112 msgid "" "Since CoreML does not allow the model parameters to be seen before " "training, and accessing the model parameters during or after the training" " can only be done by specifying the layer name, we need to know this " "information beforehand, through looking at the model specification, which" " are written as proto files. The implementation can be seen in " -":code:`MLModelInspect`." +"``MLModelInspect``." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:102 +#: ../../source/tutorial-quickstart-ios.rst:118 msgid "" "After we have all of the necessary information, let's create our Flower " "client." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:117 +#: ../../source/tutorial-quickstart-ios.rst:133 msgid "" "Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function :code:`startFlwrGRPC`." +"by passing our Flower client to the function ``startFlwrGRPC``." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:124 +#: ../../source/tutorial-quickstart-ios.rst:141 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " -"The attribute :code:`hostname` and :code:`port` tells the client which " -"server to connect to. This can be done by entering the hostname and port " -"in the application before clicking the start button to start the " -"federated learning process." +"That's it for the client. We only have to implement ``Client`` or call " +"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" +" ``hostname`` and ``port`` tells the client which server to connect to. " +"This can be done by entering the hostname and port in the application " +"before clicking the start button to start the federated learning process." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-xgboost.rst:341 +#: ../../source/tutorial-quickstart-ios.rst:148 +#: ../../source/tutorial-quickstart-scikitlearn.rst:179 +#: ../../source/tutorial-quickstart-xgboost.rst:358 msgid "Flower Server" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:131 +#: ../../source/tutorial-quickstart-ios.rst:150 msgid "" "For simple workloads we can start a Flower server and leave all the " "configuration possibilities at their default values. In a file named " -":code:`server.py`, import Flower and start the server:" +"``server.py``, import Flower and start the server:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-scikitlearn.rst:230 +#: ../../source/tutorial-quickstart-ios.rst:161 +#: ../../source/tutorial-quickstart-scikitlearn.rst:254 msgid "Train the model, federated!" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-xgboost.rst:567 +#: ../../source/tutorial-quickstart-ios.rst:163 +#: ../../source/tutorial-quickstart-xgboost.rst:590 msgid "" "With both client and server ready, we can now run everything and see " "federated learning in action. FL systems usually have a server and " "multiple clients. We therefore have to start the server first:" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:152 +#: ../../source/tutorial-quickstart-ios.rst:171 msgid "" "Once the server is running we can start the clients in different " "terminals. Build and run the client through your Xcode, one through Xcode" @@ -21215,12 +21421,12 @@ msgid "" "simulator-or-on-a-device>`_." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:156 +#: ../../source/tutorial-quickstart-ios.rst:177 msgid "" "Congratulations! You've successfully built and run your first federated " "learning system in your ios device. The full `source code " "`_ for this " -"example can be found in :code:`examples/ios`." +"example can be found in ``examples/ios``." msgstr "" #: ../../source/tutorial-quickstart-jax.rst:-1 @@ -21229,11 +21435,11 @@ msgid "" "with Jax to train a linear regression model on a scikit-learn dataset." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:5 +#: ../../source/tutorial-quickstart-jax.rst:4 msgid "Quickstart JAX" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:10 +#: ../../source/tutorial-quickstart-jax.rst:9 msgid "" "This tutorial will show you how to use Flower to build a federated " "version of an existing JAX workload. We are using JAX to train a linear " @@ -21247,167 +21453,163 @@ msgid "" "training in a federated fashion." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:16 +#: ../../source/tutorial-quickstart-jax.rst:20 msgid "" "Before we start building our JAX example, we need install the packages " -":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" +"``jax``, ``jaxlib``, ``scikit-learn``, and ``flwr``:" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:24 +#: ../../source/tutorial-quickstart-jax.rst:28 msgid "Linear Regression with JAX" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:26 +#: ../../source/tutorial-quickstart-jax.rst:30 msgid "" "We begin with a brief description of the centralized training code based " -"on a :code:`Linear Regression` model. If you want a more in-depth " -"explanation of what's going on then have a look at the official `JAX " -"documentation `_." +"on a ``Linear Regression`` model. If you want a more in-depth explanation" +" of what's going on then have a look at the official `JAX documentation " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:29 +#: ../../source/tutorial-quickstart-jax.rst:34 msgid "" -"Let's create a new file called :code:`jax_training.py` with all the " +"Let's create a new file called ``jax_training.py`` with all the " "components required for a traditional (centralized) linear regression " -"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " -"be imported. In addition, we need to import :code:`sklearn` since we use " -":code:`make_regression` for the dataset and :code:`train_test_split` to " -"split the dataset into a training and test set. You can see that we do " -"not yet import the :code:`flwr` package for federated learning. This will" -" be done later." +"training. First, the JAX packages ``jax`` and ``jaxlib`` need to be " +"imported. In addition, we need to import ``sklearn`` since we use " +"``make_regression`` for the dataset and ``train_test_split`` to split the" +" dataset into a training and test set. You can see that we do not yet " +"import the ``flwr`` package for federated learning. This will be done " +"later." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:43 -msgid "" -"The :code:`load_data()` function loads the mentioned training and test " -"sets." +#: ../../source/tutorial-quickstart-jax.rst:51 +msgid "The ``load_data()`` function loads the mentioned training and test sets." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:53 +#: ../../source/tutorial-quickstart-jax.rst:63 msgid "" -"The model architecture (a very simple :code:`Linear Regression` model) is" -" defined in :code:`load_model()`." +"The model architecture (a very simple ``Linear Regression`` model) is " +"defined in ``load_model()``." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:65 +#: ../../source/tutorial-quickstart-jax.rst:73 msgid "" -"We now need to define the training (function :code:`train()`), which " -"loops over the training set and measures the loss (function " -":code:`loss_fn()`) for each batch of training examples. The loss function" -" is separate since JAX takes derivatives with a :code:`grad()` function " -"(defined in the :code:`main()` function and called in :code:`train()`)." +"We now need to define the training (function ``train()``), which loops " +"over the training set and measures the loss (function ``loss_fn()``) for " +"each batch of training examples. The loss function is separate since JAX " +"takes derivatives with a ``grad()`` function (defined in the ``main()`` " +"function and called in ``train()``)." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:83 +#: ../../source/tutorial-quickstart-jax.rst:95 msgid "" -"The evaluation of the model is defined in the function " -":code:`evaluation()`. The function takes all test examples and measures " -"the loss of the linear regression model." +"The evaluation of the model is defined in the function ``evaluation()``. " +"The function takes all test examples and measures the loss of the linear " +"regression model." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:94 +#: ../../source/tutorial-quickstart-jax.rst:107 msgid "" "Having defined the data loading, model architecture, training, and " "evaluation we can put everything together and train our model using JAX. " -"As already mentioned, the :code:`jax.grad()` function is defined in " -":code:`main()` and passed to :code:`train()`." +"As already mentioned, the ``jax.grad()`` function is defined in " +"``main()`` and passed to ``train()``." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:111 +#: ../../source/tutorial-quickstart-jax.rst:126 msgid "You can now run your (centralized) JAX linear regression workload:" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:117 +#: ../../source/tutorial-quickstart-jax.rst:132 msgid "" "So far this should all look fairly familiar if you've used JAX before. " "Let's take the next step and use what we've built to create a simple " "federated learning system consisting of one server and two clients." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:121 +#: ../../source/tutorial-quickstart-jax.rst:137 msgid "JAX meets Flower" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:123 +#: ../../source/tutorial-quickstart-jax.rst:139 msgid "" "The concept of federating an existing workload is always the same and " "easy to understand. We have to start a *server* and then use the code in " -":code:`jax_training.py` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server*, which averages all received " -"parameter updates. This describes one round of the federated learning " -"process, and we repeat this for multiple rounds." +"``jax_training.py`` for the *clients* that are connected to the *server*." +" The *server* sends model parameters to the clients. The *clients* run " +"the training and update the parameters. The updated parameters are sent " +"back to the *server*, which averages all received parameter updates. This" +" describes one round of the federated learning process, and we repeat " +"this for multiple rounds." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:145 +#: ../../source/tutorial-quickstart-jax.rst:167 msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined JAX training in :code:`jax_training.py`. Our" -" *client* needs to import :code:`flwr`, but also :code:`jax` and " -":code:`jaxlib` to update the parameters on our JAX model:" +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined JAX training in ``jax_training.py``. Our " +"*client* needs to import ``flwr``, but also ``jax`` and ``jaxlib`` to " +"update the parameters on our JAX model:" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:160 +#: ../../source/tutorial-quickstart-jax.rst:182 msgid "" "Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " -"easier to implement than :code:`Client` if you use a framework with good " -"NumPy interoperability (like JAX) because it avoids some of the " -"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" -" to implement four methods, two methods for getting/setting model " -"parameters, one method for training the model, and one method for testing" -" the model:" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``FlowerClient``. ``NumPyClient`` is slightly easier to implement" +" than ``Client`` if you use a framework with good NumPy interoperability " +"(like JAX) because it avoids some of the boilerplate that would otherwise" +" be necessary. ``FlowerClient`` needs to implement four methods, two " +"methods for getting/setting model parameters, one method for training the" +" model, and one method for testing the model:" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid ":code:`set_parameters (optional)`" +#: ../../source/tutorial-quickstart-jax.rst:194 +msgid "``set_parameters (optional)``" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid "transform parameters to NumPy :code:`ndarray`'s" +#: ../../source/tutorial-quickstart-jax.rst:193 +msgid "transform parameters to NumPy ``ndarray``'s" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:174 +#: ../../source/tutorial-quickstart-jax.rst:203 msgid "get the updated local model parameters and return them to the server" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:178 +#: ../../source/tutorial-quickstart-jax.rst:208 msgid "return the local loss to the server" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:180 +#: ../../source/tutorial-quickstart-jax.rst:210 msgid "" "The challenging part is to transform the JAX model parameters from " -":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" -" `NumPyClient`." +"``DeviceArray`` to ``NumPy ndarray`` to make them compatible with " +"`NumPyClient`." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:182 +#: ../../source/tutorial-quickstart-jax.rst:213 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`evaluate()` previously " -"defined in :code:`jax_training.py`. So what we really do here is we tell " -"Flower through our :code:`NumPyClient` subclass which of our already " -"defined functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``evaluate()`` previously defined in " +"``jax_training.py``. So what we really do here is we tell Flower through " +"our ``NumPyClient`` subclass which of our already defined functions to " +"call for training and evaluation. We included type annotations to give " +"you a better understanding of the data types that get passed around." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:251 +#: ../../source/tutorial-quickstart-jax.rst:286 msgid "Having defined the federation process, we can run it." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:280 +#: ../../source/tutorial-quickstart-jax.rst:315 msgid "" "in each window (make sure that the server is still running before you do " "so) and see your JAX project run federated learning across two clients. " "Congratulations!" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:285 +#: ../../source/tutorial-quickstart-jax.rst:321 msgid "" "The source code of this example was improved over time and can be found " "here: `Quickstart JAX `." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:12 +#: ../../source/tutorial-quickstart-mlx.rst:10 msgid "" "Let's use `flwr new` to create a complete Flower+MLX project. It will " "generate all the files needed to run, by default with the Simulation " @@ -21446,24 +21648,24 @@ msgid "" "api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner>`_." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:27 +#: ../../source/tutorial-quickstart-mlx.rst:25 msgid "" "Then, run the command below. You will be prompted to select of the " "available templates (choose ``MLX``), give a name to your project, and " "type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:57 +#: ../../source/tutorial-quickstart-mlx.rst:53 msgid "To run the project do:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:106 +#: ../../source/tutorial-quickstart-mlx.rst:102 msgid "" "You can also override the parameters defined in " "``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:122 +#: ../../source/tutorial-quickstart-mlx.rst:116 msgid "" "We will use `Flower Datasets `_ to " "easily download and partition the `MNIST` dataset. In this example you'll" @@ -21474,20 +21676,20 @@ msgid "" "api/flwr_datasets.partitioner.html>`_ available in Flower Datasets:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:166 +#: ../../source/tutorial-quickstart-mlx.rst:157 msgid "" "We define the model as in the `centralized MLX example " "`_, it's a " "simple MLP:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:190 +#: ../../source/tutorial-quickstart-mlx.rst:180 msgid "" "We also define some utility functions to test our model and to iterate " "over batches." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:212 +#: ../../source/tutorial-quickstart-mlx.rst:201 msgid "" "The main changes we have to make to use `MLX` with `Flower` will be found" " in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " @@ -21496,17 +21698,17 @@ msgid "" "messages to work)." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:218 +#: ../../source/tutorial-quickstart-mlx.rst:206 msgid "The way MLX stores its parameters is as follows:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:231 +#: ../../source/tutorial-quickstart-mlx.rst:219 msgid "" "Therefore, to get our list of ``np.array`` objects, we need to extract " "each array and convert them into a NumPy array:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:240 +#: ../../source/tutorial-quickstart-mlx.rst:228 msgid "" "For the ``set_params()`` function, we perform the reverse operation. We " "receive a list of NumPy arrays and want to convert them into MLX " @@ -21514,24 +21716,24 @@ msgid "" "them to the `weight` and `bias` keys of each layer dict:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:255 +#: ../../source/tutorial-quickstart-mlx.rst:243 msgid "" "The rest of the functionality is directly inspired by the centralized " "case. The ``fit()`` method in the client trains the model using the local" " dataset:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:272 +#: ../../source/tutorial-quickstart-mlx.rst:259 msgid "" "Here, after updating the parameters, we perform the training as in the " "centralized case, and return the new parameters." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:275 +#: ../../source/tutorial-quickstart-mlx.rst:262 msgid "And for the ``evaluate()`` method of the client:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:285 +#: ../../source/tutorial-quickstart-mlx.rst:272 msgid "" "We also begin by updating the parameters with the ones sent by the " "server, and then we compute the loss and accuracy using the functions " @@ -21539,11 +21741,11 @@ msgid "" "the `MLP` model as well as other components such as the optimizer." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:290 +#: ../../source/tutorial-quickstart-mlx.rst:277 msgid "Putting everything together we have:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:344 +#: ../../source/tutorial-quickstart-mlx.rst:331 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " "defined above by means of a ``client_fn()`` callback. Note that " @@ -21554,7 +21756,7 @@ msgid "" "method." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:378 +#: ../../source/tutorial-quickstart-mlx.rst:363 msgid "" "To construct a ``ServerApp``, we define a ``server_fn()`` callback with " "an identical signature to that of ``client_fn()``, but the return type is" @@ -21565,15 +21767,15 @@ msgid "" "``FedAvg`` strategy." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:402 -#: ../../source/tutorial-quickstart-pytorch.rst:360 -#: ../../source/tutorial-quickstart-tensorflow.rst:279 +#: ../../source/tutorial-quickstart-mlx.rst:386 +#: ../../source/tutorial-quickstart-pytorch.rst:344 +#: ../../source/tutorial-quickstart-tensorflow.rst:266 msgid "" "Congratulations! You've successfully built and run your first federated " "learning system." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:407 +#: ../../source/tutorial-quickstart-mlx.rst:390 msgid "" "Check the `source code `_ of the extended version of this tutorial in ``examples" @@ -21586,15 +21788,15 @@ msgid "" "with Pandas to perform Federated Analytics." msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:5 +#: ../../source/tutorial-quickstart-pandas.rst:4 msgid "Quickstart Pandas" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:10 +#: ../../source/tutorial-quickstart-pandas.rst:9 msgid "Let's build a federated analytics system using Pandas and Flower!" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:12 +#: ../../source/tutorial-quickstart-pandas.rst:11 msgid "" "Please refer to the `full code example " "`_ " @@ -21607,7 +21809,7 @@ msgid "" "with PyTorch to train a CNN model on MNIST." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:7 +#: ../../source/tutorial-quickstart-pytorch.rst:6 msgid "" "In this federated learning tutorial we will learn how to train a " "Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " @@ -21615,7 +21817,7 @@ msgid "" ":doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:12 +#: ../../source/tutorial-quickstart-pytorch.rst:11 msgid "" "Let's use `flwr new` to create a complete Flower+PyTorch project. It will" " generate all the files needed to run, by default with the Flower " @@ -21627,14 +21829,14 @@ msgid "" "api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner>`_." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:27 +#: ../../source/tutorial-quickstart-pytorch.rst:26 msgid "" "Then, run the command below. You will be prompted to select one of the " "available templates (choose ``PyTorch``), give a name to your project, " "and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:121 +#: ../../source/tutorial-quickstart-pytorch.rst:117 msgid "" "This tutorial uses `Flower Datasets `_ " "to easily download and partition the `CIFAR-10` dataset. In this example " @@ -21648,13 +21850,13 @@ msgid "" " that correspond to their data partition." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:159 +#: ../../source/tutorial-quickstart-pytorch.rst:152 msgid "" "We defined a simple Convolutional Neural Network (CNN), but feel free to " "replace it with a more sophisticated model if you'd like:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:184 +#: ../../source/tutorial-quickstart-pytorch.rst:177 msgid "" "In addition to defining the model architecture, we also include two " "utility functions to perform both training (i.e. ``train()``) and " @@ -21667,7 +21869,7 @@ msgid "" "training or evaluation:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:236 +#: ../../source/tutorial-quickstart-pytorch.rst:226 msgid "" "The main changes we have to make to use `PyTorch` with `Flower` will be " "found in the ``get_weights()`` and ``set_weights()`` functions. In " @@ -21677,7 +21879,7 @@ msgid "" "PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:294 +#: ../../source/tutorial-quickstart-pytorch.rst:282 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " "defined above by means of a ``client_fn()`` callback. Note that the " @@ -21688,7 +21890,7 @@ msgid "" "additioinal hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:323 +#: ../../source/tutorial-quickstart-pytorch.rst:309 msgid "" "To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" " identical signature to that of ``client_fn()`` but the return type is " @@ -21702,30 +21904,30 @@ msgid "" "``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:365 +#: ../../source/tutorial-quickstart-pytorch.rst:348 msgid "" "Check the `source code `_ of the extended version of this tutorial in " "``examples/quickstart-pytorch`` in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:372 -#: ../../source/tutorial-quickstart-tensorflow.rst:295 +#: ../../source/tutorial-quickstart-pytorch.rst:354 +#: ../../source/tutorial-quickstart-tensorflow.rst:278 msgid "Video tutorial" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:376 +#: ../../source/tutorial-quickstart-pytorch.rst:358 msgid "" "The video shown below shows how to setup a PyTorch + Flower project using" " our previously recommended APIs. A new video tutorial will be released " "that shows the new APIs (as the content above does)" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 msgid "Quickstart PyTorch Lightning" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:7 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 msgid "" "In this federated learning tutorial we will learn how to train an " "AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " @@ -21733,13 +21935,13 @@ msgid "" ":doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:20 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 msgid "" "This will create a new directory called `quickstart-pytorch-lightning` " "containing the following files:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:43 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 msgid "" "By default, Flower Simulation Engine will be started and it will create a" " federation of 4 nodes using `FedAvg `_ of this tutorial in ``examples" @@ -21769,14 +21971,14 @@ msgid "" "with scikit-learn to train a linear regression model." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:5 +#: ../../source/tutorial-quickstart-scikitlearn.rst:4 msgid "Quickstart scikit-learn" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +#: ../../source/tutorial-quickstart-scikitlearn.rst:9 msgid "" -"In this tutorial, we will learn how to train a :code:`Logistic " -"Regression` model on MNIST using Flower and scikit-learn." +"In this tutorial, we will learn how to train a ``Logistic Regression`` " +"model on MNIST using Flower and scikit-learn." msgstr "" #: ../../source/tutorial-quickstart-scikitlearn.rst:12 @@ -21785,13 +21987,13 @@ msgid "" "within this :doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +#: ../../source/tutorial-quickstart-scikitlearn.rst:15 msgid "" "Our example consists of one *server* and two *clients* all having the " "same model." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +#: ../../source/tutorial-quickstart-scikitlearn.rst:17 msgid "" "*Clients* are responsible for generating individual model parameter " "updates for the model based on their local datasets. These updates are " @@ -21801,188 +22003,187 @@ msgid "" "called a *round*." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +#: ../../source/tutorial-quickstart-scikitlearn.rst:23 msgid "" "Now that we have a rough idea of what is going on, let's get started. We " "first need to install Flower. You can do this by running:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:26 +#: ../../source/tutorial-quickstart-scikitlearn.rst:30 msgid "Since we want to use scikit-learn, let's go ahead and install it:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:32 +#: ../../source/tutorial-quickstart-scikitlearn.rst:36 msgid "Or simply install all dependencies using Poetry:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 msgid "" "Now that we have all our dependencies installed, let's run a simple " "distributed training with two clients and one server. However, before " "setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within :code:`utils.py`. The " -":code:`utils.py` contains different functions defining all the machine " +" we need for our federated learning setup within ``utils.py``. The " +"``utils.py`` contains different functions defining all the machine " "learning basics:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid ":code:`get_model_parameters()`" +#: ../../source/tutorial-quickstart-scikitlearn.rst:51 +msgid "``get_model_parameters()``" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:46 -msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:47 -msgid ":code:`set_model_params()`" +#: ../../source/tutorial-quickstart-scikitlearn.rst:53 +msgid "``set_model_params()``" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:48 -msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/tutorial-quickstart-scikitlearn.rst:54 +msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid ":code:`set_initial_params()`" +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid "``set_initial_params()``" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 msgid "Initializes the model parameters that the Flower server will ask for" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#: ../../source/tutorial-quickstart-scikitlearn.rst:58 msgid "" -"Please check out :code:`utils.py` `here " +"Please check out ``utils.py`` `here " "`_ for more details. The pre-defined functions are used in" -" the :code:`client.py` and imported. The :code:`client.py` also requires " -"to import several packages such as Flower and scikit-learn:" +" the ``client.py`` and imported. The ``client.py`` also requires to " +"import several packages such as Flower and scikit-learn:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:67 +#: ../../source/tutorial-quickstart-scikitlearn.rst:75 msgid "" "Prior to local training, we need to load the MNIST dataset, a popular " "image classification dataset of handwritten digits for machine learning, " "and partition the dataset for FL. This can be conveniently achieved using" " `Flower Datasets `_. The " -":code:`FederatedDataset.load_partition()` method loads the partitioned " -"training set for each partition ID defined in the :code:`--partition-id` " +"``FederatedDataset.load_partition()`` method loads the partitioned " +"training set for each partition ID defined in the ``--partition-id`` " "argument." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:95 +#: ../../source/tutorial-quickstart-scikitlearn.rst:106 msgid "" "Next, the logistic regression model is defined and initialized with " -":code:`utils.set_initial_params()`." +"``utils.set_initial_params()``." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:107 +#: ../../source/tutorial-quickstart-scikitlearn.rst:119 msgid "" "The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to fit the logistic regression we defined earlier)." +"``Client``. When the server selects a particular client for training, it " +"sends training instructions over the network. The client receives those " +"instructions and calls one of the ``Client`` methods to run your code " +"(i.e., to fit the logistic regression we defined earlier)." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +#: ../../source/tutorial-quickstart-scikitlearn.rst:124 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " -"means defining the following methods (:code:`set_parameters` is optional " -"though):" +"Flower provides a convenience class called ``NumPyClient`` which makes it" +" easier to implement the ``Client`` interface when your workload uses " +"scikit-learn. Implementing ``NumPyClient`` usually means defining the " +"following methods (``set_parameters`` is optional though):" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +#: ../../source/tutorial-quickstart-scikitlearn.rst:130 msgid "return the model weight as a list of NumPy ndarrays" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 -msgid ":code:`set_parameters` (optional)" +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +msgid "``set_parameters`` (optional)" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 msgid "" "update the local model weights with the parameters received from the " "server" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:122 -msgid "is directly imported with :code:`utils.set_model_params()`" +#: ../../source/tutorial-quickstart-scikitlearn.rst:133 +msgid "is directly imported with ``utils.set_model_params()``" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +#: ../../source/tutorial-quickstart-scikitlearn.rst:135 msgid "set the local model weights" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 msgid "train the local model" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:126 +#: ../../source/tutorial-quickstart-scikitlearn.rst:137 msgid "return the updated local model weights" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 msgid "test the local model" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 +#: ../../source/tutorial-quickstart-scikitlearn.rst:141 msgid "The methods can be implemented in the following way:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:153 +#: ../../source/tutorial-quickstart-scikitlearn.rst:163 msgid "" -"We can now create an instance of our class :code:`MnistClient` and add " -"one line to actually run this client:" +"We can now create an instance of our class ``MnistClient`` and add one " +"line to actually run this client:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:160 +#: ../../source/tutorial-quickstart-scikitlearn.rst:170 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" -" the client which server to connect to. In our case we can run the server" -" and the client on the same machine, therefore we use " -":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we pass to the client." +"That's it for the client. We only have to implement ``Client`` or " +"``NumPyClient`` and call ``fl.client.start_client()``. If you implement a" +" client of type ``NumPyClient`` you'll need to first call its " +"``to_client()`` method. The string ``\"0.0.0.0:8080\"`` tells the client " +"which server to connect to. In our case we can run the server and the " +"client on the same machine, therefore we use ``\"0.0.0.0:8080\"``. If we " +"run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" pass to the client." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:169 +#: ../../source/tutorial-quickstart-scikitlearn.rst:181 msgid "" "The following Flower server is a little bit more advanced and returns an " "evaluation function for the server-side evaluation. First, we import " "again all required libraries such as Flower and scikit-learn." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:172 -msgid ":code:`server.py`, import Flower and start the server:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +msgid "``server.py``, import Flower and start the server:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and" -" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " -"function is called after each federated learning round and gives you " -"information about loss and accuracy. Note that we also make use of Flower" -" Datasets here to load the test split of the MNIST dataset for server-" -"side evaluation." +"The number of federated learning rounds is set in ``fit_round()`` and the" +" evaluation is defined in ``get_evaluate_fn()``. The evaluation function " +"is called after each federated learning round and gives you information " +"about loss and accuracy. Note that we also make use of Flower Datasets " +"here to load the test split of the MNIST dataset for server-side " +"evaluation." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:213 +#: ../../source/tutorial-quickstart-scikitlearn.rst:228 msgid "" -"The :code:`main` contains the server-side parameter initialization " -":code:`utils.set_initial_params()` as well as the aggregation strategy " -":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " +"The ``main`` contains the server-side parameter initialization " +"``utils.set_initial_params()`` as well as the aggregation strategy " +"``fl.server.strategy:FedAvg()``. The strategy is the default one, " "federated averaging (or FedAvg), with two clients and evaluation after " "each federated learning round. The server can be started with the command" -" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +" ``fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))``." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:232 +#: ../../source/tutorial-quickstart-scikitlearn.rst:256 msgid "" "With both client and server ready, we can now run everything and see " "federated learning in action. Federated learning systems usually have a " @@ -21990,33 +22191,33 @@ msgid "" "first:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-xgboost.rst:575 +#: ../../source/tutorial-quickstart-scikitlearn.rst:264 +#: ../../source/tutorial-quickstart-xgboost.rst:598 msgid "" "Once the server is running we can start the clients in different " "terminals. Open a new terminal and start the first client:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-xgboost.rst:582 +#: ../../source/tutorial-quickstart-scikitlearn.rst:271 +#: ../../source/tutorial-quickstart-xgboost.rst:605 msgid "Open another terminal and start the second client:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:252 -#: ../../source/tutorial-quickstart-xgboost.rst:588 +#: ../../source/tutorial-quickstart-scikitlearn.rst:277 +#: ../../source/tutorial-quickstart-xgboost.rst:611 msgid "" "Each client will have its own dataset. You should now see how the " "training does in the very first terminal (the one that started the " "server):" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:286 +#: ../../source/tutorial-quickstart-scikitlearn.rst:311 msgid "" "Congratulations! You've successfully built and run your first federated " "learning system. The full `source code " "`_ for this example can be found in :code:`examples/sklearn-logreg-" -"mnist`." +"mnist>`_ for this example can be found in ``examples/sklearn-logreg-" +"mnist``." msgstr "" #: ../../source/tutorial-quickstart-tensorflow.rst:-1 @@ -22025,11 +22226,11 @@ msgid "" "with TensorFlow to train a CNN model on CIFAR-10." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:5 +#: ../../source/tutorial-quickstart-tensorflow.rst:4 msgid "Quickstart TensorFlow" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:7 +#: ../../source/tutorial-quickstart-tensorflow.rst:6 msgid "" "In this tutorial we will learn how to train a Convolutional Neural " "Network on CIFAR-10 using the Flower framework and TensorFlow. First of " @@ -22037,7 +22238,7 @@ msgid "" " within a :doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:13 +#: ../../source/tutorial-quickstart-tensorflow.rst:11 msgid "" "Let's use `flwr new` to create a complete Flower+TensorFlow project. It " "will generate all the files needed to run, by default with the Flower " @@ -22049,14 +22250,14 @@ msgid "" "api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner>`_." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:28 +#: ../../source/tutorial-quickstart-tensorflow.rst:26 msgid "" "Then, run the command below. You will be prompted to select one of the " "available templates (choose ``TensorFlow``), give a name to your project," " and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:118 +#: ../../source/tutorial-quickstart-tensorflow.rst:114 msgid "" "This tutorial uses `Flower Datasets `_ " "to easily download and partition the `CIFAR-10` dataset. In this example " @@ -22070,14 +22271,14 @@ msgid "" " correspond to their data partition." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:147 +#: ../../source/tutorial-quickstart-tensorflow.rst:141 msgid "" "Next, we need a model. We defined a simple Convolutional Neural Network " "(CNN), but feel free to replace it with a more sophisticated model if " "you'd like:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:178 +#: ../../source/tutorial-quickstart-tensorflow.rst:170 msgid "" "With `TensorFlow`, we can use the built-in ``get_weights()`` and " "``set_weights()`` functions, which simplifies the implementation with " @@ -22088,7 +22289,7 @@ msgid "" "set that the client might have:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:212 +#: ../../source/tutorial-quickstart-tensorflow.rst:203 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " "defined above by means of a ``client_fn()`` callback. Note that the " @@ -22100,7 +22301,7 @@ msgid "" "``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:247 +#: ../../source/tutorial-quickstart-tensorflow.rst:234 msgid "" "To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" " identical signature to that of ``client_fn()`` but the return type is " @@ -22112,13 +22313,13 @@ msgid "" "the global model to federate." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:284 +#: ../../source/tutorial-quickstart-tensorflow.rst:270 msgid "" "Check the source code of the extended version of this tutorial in " "|quickstart_tf_link|_ in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:299 +#: ../../source/tutorial-quickstart-tensorflow.rst:282 msgid "" "The video shown below shows how to setup a TensorFlow + Flower project " "using our previously recommended APIs. A new video tutorial will be " @@ -22131,15 +22332,15 @@ msgid "" "with XGBoost to train classification models on trees." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:5 +#: ../../source/tutorial-quickstart-xgboost.rst:4 msgid "Quickstart XGBoost" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:14 +#: ../../source/tutorial-quickstart-xgboost.rst:13 msgid "Federated XGBoost" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:16 +#: ../../source/tutorial-quickstart-xgboost.rst:15 msgid "" "EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " "implementation of gradient-boosted decision tree (**GBDT**), that " @@ -22149,18 +22350,18 @@ msgid "" "concurrently, unlike the sequential approach taken by GBDT." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:20 +#: ../../source/tutorial-quickstart-xgboost.rst:21 msgid "" "Often, for tabular data on medium-sized datasets with fewer than 10k " "training examples, XGBoost surpasses the results of deep learning " "techniques." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:23 +#: ../../source/tutorial-quickstart-xgboost.rst:25 msgid "Why federated XGBoost?" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:25 +#: ../../source/tutorial-quickstart-xgboost.rst:27 msgid "" "Indeed, as the demand for data privacy and decentralized learning grows, " "there's an increasing requirement to implement federated XGBoost systems " @@ -22168,7 +22369,7 @@ msgid "" "detection." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/tutorial-quickstart-xgboost.rst:31 msgid "" "Federated learning ensures that raw data remains on the local device, " "making it an attractive approach for sensitive domains where data " @@ -22177,10 +22378,10 @@ msgid "" "solution for these specific challenges." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:30 +#: ../../source/tutorial-quickstart-xgboost.rst:36 msgid "" "In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " +"HIGGS dataset using Flower and ``xgboost`` package. We use a simple " "example (`full code xgboost-quickstart " "`_)" " with two *clients* and one *server* to demonstrate how federated XGBoost" @@ -22189,30 +22390,30 @@ msgid "" "comprehensive>`_) to run various experiments." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:37 +#: ../../source/tutorial-quickstart-xgboost.rst:46 msgid "Environment Setup" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:39 +#: ../../source/tutorial-quickstart-xgboost.rst:48 msgid "" "First of all, it is recommended to create a virtual environment and run " "everything within a :doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:41 +#: ../../source/tutorial-quickstart-xgboost.rst:51 msgid "" "We first need to install Flower and Flower Datasets. You can do this by " "running :" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:47 +#: ../../source/tutorial-quickstart-xgboost.rst:57 msgid "" -"Since we want to use :code:`xgboost` package to build up XGBoost trees, " -"let's go ahead and install :code:`xgboost`:" +"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" +" go ahead and install ``xgboost``:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:57 +#: ../../source/tutorial-quickstart-xgboost.rst:67 msgid "" "*Clients* are responsible for generating individual weight-updates for " "the model based on their local datasets. Now that we have all our " @@ -22220,219 +22421,215 @@ msgid "" "clients and one server." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:60 +#: ../../source/tutorial-quickstart-xgboost.rst:71 msgid "" -"In a file called :code:`client.py`, import xgboost, Flower, Flower " -"Datasets and other related functions:" +"In a file called ``client.py``, import xgboost, Flower, Flower Datasets " +"and other related functions:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:87 +#: ../../source/tutorial-quickstart-xgboost.rst:99 msgid "Dataset partition and hyper-parameter selection" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:89 +#: ../../source/tutorial-quickstart-xgboost.rst:101 msgid "" "Prior to local training, we require loading the HIGGS dataset from Flower" " Datasets and conduct data partitioning for FL:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:102 +#: ../../source/tutorial-quickstart-xgboost.rst:115 msgid "" "In this example, we split the dataset into 30 partitions with uniform " -"distribution (:code:`IidPartitioner(num_partitions=30)`). Then, we load " -"the partition for the given client based on :code:`partition_id`:" +"distribution (``IidPartitioner(num_partitions=30)``). Then, we load the " +"partition for the given client based on ``partition_id``:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:121 +#: ../../source/tutorial-quickstart-xgboost.rst:135 msgid "" "After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for :code:`xgboost` package." +"local data), and transform data format for ``xgboost`` package." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:134 +#: ../../source/tutorial-quickstart-xgboost.rst:149 msgid "" -"The functions of :code:`train_test_split` and " -":code:`transform_dataset_to_dmatrix` are defined as below:" +"The functions of ``train_test_split`` and " +"``transform_dataset_to_dmatrix`` are defined as below:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:158 +#: ../../source/tutorial-quickstart-xgboost.rst:174 msgid "Finally, we define the hyper-parameters used for XGBoost training." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:174 +#: ../../source/tutorial-quickstart-xgboost.rst:190 msgid "" -"The :code:`num_local_round` represents the number of iterations for local" -" tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " -"evaluation metric." +"The ``num_local_round`` represents the number of iterations for local " +"tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " +"metric." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:181 +#: ../../source/tutorial-quickstart-xgboost.rst:195 msgid "Flower client definition for XGBoost" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:183 +#: ../../source/tutorial-quickstart-xgboost.rst:197 msgid "" "After loading the dataset we define the Flower client. We follow the " -"general rule to define :code:`XgbClient` class inherited from " -":code:`fl.client.Client`." +"general rule to define ``XgbClient`` class inherited from " +"``fl.client.Client``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:205 +#: ../../source/tutorial-quickstart-xgboost.rst:219 msgid "" -"All required parameters defined above are passed to :code:`XgbClient`'s " +"All required parameters defined above are passed to ``XgbClient``'s " "constructor." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:207 +#: ../../source/tutorial-quickstart-xgboost.rst:221 msgid "" -"Then, we override :code:`get_parameters`, :code:`fit` and " -":code:`evaluate` methods insides :code:`XgbClient` class as follows." +"Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods " +"insides ``XgbClient`` class as follows." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:221 +#: ../../source/tutorial-quickstart-xgboost.rst:236 msgid "" "Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use " -":code:`get_parameters` and :code:`set_parameters` to initialise model " -"parameters for XGBoost. As a result, let's return an empty tensor in " -":code:`get_parameters` when it is called by the server at the first " -"round." +"specified random weights. In this case, we do not use ``get_parameters`` " +"and ``set_parameters`` to initialise model parameters for XGBoost. As a " +"result, let's return an empty tensor in ``get_parameters`` when it is " +"called by the server at the first round." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:262 +#: ../../source/tutorial-quickstart-xgboost.rst:278 msgid "" -"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " -"up the first set of trees. From the second round, we load the global " -"model sent from server to new build Booster object, and then update model" -" weights on local training data with function :code:`local_boost` as " -"follows:" +"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " +"first set of trees. From the second round, we load the global model sent " +"from server to new build Booster object, and then update model weights on" +" local training data with function ``local_boost`` as follows:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:281 +#: ../../source/tutorial-quickstart-xgboost.rst:298 msgid "" -"Given :code:`num_local_round`, we update trees by calling " -":code:`bst_input.update` method. After training, the last " -":code:`N=num_local_round` trees will be extracted to send to the server." +"Given ``num_local_round``, we update trees by calling " +"``bst_input.update`` method. After training, the last " +"``N=num_local_round`` trees will be extracted to send to the server." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:313 +#: ../../source/tutorial-quickstart-xgboost.rst:330 msgid "" -"In :code:`evaluate`, after loading the global model, we call " -":code:`bst.eval_set` function to conduct evaluation on valid set. The AUC" -" value will be returned." +"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" +" function to conduct evaluation on valid set. The AUC value will be " +"returned." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:316 +#: ../../source/tutorial-quickstart-xgboost.rst:333 msgid "" -"Now, we can create an instance of our class :code:`XgbClient` and add one" -" line to actually run this client:" +"Now, we can create an instance of our class ``XgbClient`` and add one " +"line to actually run this client:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:332 +#: ../../source/tutorial-quickstart-xgboost.rst:350 msgid "" -"That's it for the client. We only have to implement :code:`Client` and " -"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"That's it for the client. We only have to implement ``Client`` and call " +"``fl.client.start_client()``. The string ``\"[::]:8080\"`` tells the " +"client which server to connect to. In our case we can run the server and " +"the client on the same machine, therefore we use ``\"[::]:8080\"``. If we" +" run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" point the client at." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:343 +#: ../../source/tutorial-quickstart-xgboost.rst:360 msgid "" "These updates are then sent to the *server* which will aggregate them to " "produce a better model. Finally, the *server* sends this improved version" " of the model back to each *client* to finish a complete FL round." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:346 +#: ../../source/tutorial-quickstart-xgboost.rst:364 msgid "" -"In a file named :code:`server.py`, import Flower and FedXgbBagging from " -":code:`flwr.server.strategy`." +"In a file named ``server.py``, import Flower and FedXgbBagging from " +"``flwr.server.strategy``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:348 +#: ../../source/tutorial-quickstart-xgboost.rst:367 msgid "We first define a strategy for XGBoost bagging aggregation." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:380 +#: ../../source/tutorial-quickstart-xgboost.rst:401 msgid "" -"We use two clients for this example. An " -":code:`evaluate_metrics_aggregation` function is defined to collect and " -"wighted average the AUC values from clients. The :code:`config_func` " -"function is to return the current FL round number to client's " -":code:`fit()` and :code:`evaluate()` methods." +"We use two clients for this example. An ``evaluate_metrics_aggregation`` " +"function is defined to collect and wighted average the AUC values from " +"clients. The ``config_func`` function is to return the current FL round " +"number to client's ``fit()`` and ``evaluate()`` methods." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:384 +#: ../../source/tutorial-quickstart-xgboost.rst:406 msgid "Then, we start the server:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:396 +#: ../../source/tutorial-quickstart-xgboost.rst:418 msgid "Tree-based bagging aggregation" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:398 +#: ../../source/tutorial-quickstart-xgboost.rst:420 msgid "" "You must be curious about how bagging aggregation works. Let's look into " "the details." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:400 +#: ../../source/tutorial-quickstart-xgboost.rst:422 msgid "" -"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " -":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." -" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " -"and :code:`evaluate` methods as follows:" +"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " +"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " +"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " +"``evaluate`` methods as follows:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:496 +#: ../../source/tutorial-quickstart-xgboost.rst:519 msgid "" -"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " -"trees by calling :code:`aggregate()` function:" +"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " +"trees by calling ``aggregate()`` function:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:555 +#: ../../source/tutorial-quickstart-xgboost.rst:579 msgid "" "In this function, we first fetch the number of trees and the number of " "parallel trees for the current and previous model by calling " -":code:`_get_tree_nums`. Then, the fetched information will be aggregated." -" After that, the trees (containing model weights) are aggregated to " +"``_get_tree_nums``. Then, the fetched information will be aggregated. " +"After that, the trees (containing model weights) are aggregated to " "generate a new tree model." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:560 +#: ../../source/tutorial-quickstart-xgboost.rst:584 msgid "" "After traversal of all clients' models, a new global model is generated, " "followed by the serialisation, and sending back to each client." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:565 +#: ../../source/tutorial-quickstart-xgboost.rst:588 msgid "Launch Federated XGBoost!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:641 +#: ../../source/tutorial-quickstart-xgboost.rst:664 msgid "" "Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in " -":code:`metrics_distributed`. One can see that the average AUC increases " -"over FL rounds." +"XGBoost system. The AUC values can be checked in ``metrics_distributed``." +" One can see that the average AUC increases over FL rounds." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:646 +#: ../../source/tutorial-quickstart-xgboost.rst:668 msgid "" "The full `source code `_ for this example can be found in :code:`examples" -"/xgboost-quickstart`." +"/xgboost-quickstart/>`_ for this example can be found in ``examples" +"/xgboost-quickstart``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:650 +#: ../../source/tutorial-quickstart-xgboost.rst:673 msgid "Comprehensive Federated XGBoost" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:652 +#: ../../source/tutorial-quickstart-xgboost.rst:675 msgid "" "Now that you have known how federated XGBoost work with Flower, it's time" " to run some more comprehensive experiments by customising the " @@ -22445,11 +22642,11 @@ msgid "" "client cohorts in a resource-aware manner. Let's take a look!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:659 +#: ../../source/tutorial-quickstart-xgboost.rst:685 msgid "Cyclic training" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:661 +#: ../../source/tutorial-quickstart-xgboost.rst:687 msgid "" "In addition to bagging aggregation, we offer a cyclic training scheme, " "which performs FL in a client-by-client fashion. Instead of aggregating " @@ -22459,178 +22656,176 @@ msgid "" "for next round's boosting." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:665 -msgid "" -"To do this, we first customise a :code:`ClientManager` in " -":code:`server_utils.py`:" +#: ../../source/tutorial-quickstart-xgboost.rst:693 +msgid "To do this, we first customise a ``ClientManager`` in ``server_utils.py``:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:705 +#: ../../source/tutorial-quickstart-xgboost.rst:733 msgid "" -"The customised :code:`ClientManager` samples all available clients in " -"each FL round based on the order of connection to the server. Then, we " -"define a new strategy :code:`FedXgbCyclic` in " -":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " +"The customised ``ClientManager`` samples all available clients in each FL" +" round based on the order of connection to the server. Then, we define a " +"new strategy ``FedXgbCyclic`` in " +"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " "select only one client in given round and pass the received model to next" " client." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:746 +#: ../../source/tutorial-quickstart-xgboost.rst:775 msgid "" -"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " +"Unlike the original ``FedAvg``, we don't perform aggregation here. " "Instead, we just make a copy of the received client model as global model" -" by overriding :code:`aggregate_fit`." +" by overriding ``aggregate_fit``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:749 +#: ../../source/tutorial-quickstart-xgboost.rst:778 msgid "" -"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" -" methods ensure the clients to be sequentially selected given FL round:" +"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" +" ensure the clients to be sequentially selected given FL round:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:813 +#: ../../source/tutorial-quickstart-xgboost.rst:840 msgid "Customised data partitioning" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:815 +#: ../../source/tutorial-quickstart-xgboost.rst:842 msgid "" -"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" -" to instantiate the data partitioner based on the given " -":code:`num_partitions` and :code:`partitioner_type`. Currently, we " -"provide four supported partitioner type to simulate the uniformity/non-" -"uniformity in data quantity (uniform, linear, square, exponential)." +"In ``dataset.py``, we have a function ``instantiate_partitioner`` to " +"instantiate the data partitioner based on the given ``num_partitions`` " +"and ``partitioner_type``. Currently, we provide four supported " +"partitioner type to simulate the uniformity/non-uniformity in data " +"quantity (uniform, linear, square, exponential)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:846 +#: ../../source/tutorial-quickstart-xgboost.rst:873 msgid "Customised centralised/distributed evaluation" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:848 +#: ../../source/tutorial-quickstart-xgboost.rst:875 msgid "" "To facilitate centralised evaluation, we define a function in " -":code:`server_utils.py`:" +"``server_utils.py``:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:880 +#: ../../source/tutorial-quickstart-xgboost.rst:907 msgid "" "This function returns a evaluation function which instantiates a " -":code:`Booster` object and loads the global model weights to it. The " -"evaluation is conducted by calling :code:`eval_set()` method, and the " -"tested AUC value is reported." +"``Booster`` object and loads the global model weights to it. The " +"evaluation is conducted by calling ``eval_set()`` method, and the tested " +"AUC value is reported." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:883 +#: ../../source/tutorial-quickstart-xgboost.rst:911 msgid "" "As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the :code:`evaluate()` method insides the " -":code:`XgbClient` class in :code:`client_utils.py`." +"start example by overriding the ``evaluate()`` method insides the " +"``XgbClient`` class in ``client_utils.py``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:887 +#: ../../source/tutorial-quickstart-xgboost.rst:916 msgid "Flower simulation" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:888 +#: ../../source/tutorial-quickstart-xgboost.rst:918 msgid "" -"We also provide an example code (:code:`sim.py`) to use the simulation " +"We also provide an example code (``sim.py``) to use the simulation " "capabilities of Flower to simulate federated XGBoost training on either a" " single machine or a cluster of machines." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:922 +#: ../../source/tutorial-quickstart-xgboost.rst:954 msgid "" -"After importing all required packages, we define a :code:`main()` " -"function to perform the simulation process:" +"After importing all required packages, we define a ``main()`` function to" +" perform the simulation process:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:977 +#: ../../source/tutorial-quickstart-xgboost.rst:1010 msgid "" "We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a :code:`list`. After the simulation begins, " -"the clients won't need to pre-process their partitions again." +"processed data is stored in a ``list``. After the simulation begins, the " +"clients won't need to pre-process their partitions again." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:980 +#: ../../source/tutorial-quickstart-xgboost.rst:1014 msgid "Then, we define the strategies and other hyper-parameters:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1031 +#: ../../source/tutorial-quickstart-xgboost.rst:1065 msgid "" "After that, we start the simulation by calling " -":code:`fl.simulation.start_simulation`:" +"``fl.simulation.start_simulation``:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1051 +#: ../../source/tutorial-quickstart-xgboost.rst:1085 msgid "" -"One of key parameters for :code:`start_simulation` is :code:`client_fn` " -"which returns a function to construct a client. We define it as follows:" +"One of key parameters for ``start_simulation`` is ``client_fn`` which " +"returns a function to construct a client. We define it as follows:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1094 +#: ../../source/tutorial-quickstart-xgboost.rst:1126 msgid "Arguments parser" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1096 +#: ../../source/tutorial-quickstart-xgboost.rst:1128 msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients, server " -"and simulation, allowing users to specify different experimental " -"settings. Let's first see the sever side:" +"In ``utils.py``, we define the arguments parsers for clients, server and " +"simulation, allowing users to specify different experimental settings. " +"Let's first see the sever side:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1142 +#: ../../source/tutorial-quickstart-xgboost.rst:1175 msgid "" "This allows user to specify training strategies / the number of total " "clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" -" will do centralised evaluation and all functionalities for client " +" evaluation fashion. Note that with ``--centralised-eval``, the sever " +"will do centralised evaluation and all functionalities for client " "evaluation will be disabled." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1146 +#: ../../source/tutorial-quickstart-xgboost.rst:1180 msgid "Then, the argument parser on client side:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1200 +#: ../../source/tutorial-quickstart-xgboost.rst:1234 msgid "" "This defines various options for client data partitioning. Besides, " "clients also have an option to conduct evaluation on centralised test set" -" by setting :code:`--centralised-eval`, as well as an option to perform " -"scaled learning rate based on the number of clients by setting :code" -":`--scaled-lr`." +" by setting ``--centralised-eval``, as well as an option to perform " +"scaled learning rate based on the number of clients by setting " +"``--scaled-lr``." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1204 +#: ../../source/tutorial-quickstart-xgboost.rst:1239 msgid "We also have an argument parser for simulation:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1282 +#: ../../source/tutorial-quickstart-xgboost.rst:1317 msgid "This integrates all arguments for both client and server sides." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1285 +#: ../../source/tutorial-quickstart-xgboost.rst:1320 msgid "Example commands" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1287 +#: ../../source/tutorial-quickstart-xgboost.rst:1322 msgid "" "To run a centralised evaluated experiment with bagging strategy on 5 " "clients with exponential distribution for 50 rounds, we first start the " "server as below:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1294 +#: ../../source/tutorial-quickstart-xgboost.rst:1329 msgid "Then, on each client terminal, we start the clients:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1300 +#: ../../source/tutorial-quickstart-xgboost.rst:1335 msgid "To run the same experiment with Flower simulation:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1306 +#: ../../source/tutorial-quickstart-xgboost.rst:1341 msgid "" "The full `code `_ for this comprehensive example can be found in" -" :code:`examples/xgboost-comprehensive`." +" ``examples/xgboost-comprehensive``." msgstr "" #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 @@ -23969,7 +24164,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +msgid "|3a7aceef05f0421794726ac54aaf12fd|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 @@ -23984,7 +24179,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|33cacb7d985c4906b348515c1a5cd993|" +msgid "|d741075f8e624331b42c0746f7d258a0|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 @@ -24005,7 +24200,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|cc080a555947492fa66131dc3a967603|" +msgid "|8fc92d668bcb42b8bda55143847f2329|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 @@ -24021,7 +24216,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|085c3e0fb8664c6aa06246636524b20b|" +msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 @@ -24037,7 +24232,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|bfe69c74e48c45d49b50251c38c2a019|" +msgid "|77a037b546a84262b608e04bc82a2c96|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 @@ -24052,7 +24247,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +msgid "|f568e24c9fb0435690ac628210a4be96|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 @@ -24072,7 +24267,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|163117eb654a4273babba413cf8065f5|" +msgid "|a7bf029981514e2593aa3a2b48c9d76a|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 @@ -24087,7 +24282,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +msgid "|3f645ad807f84be8b1f8f3267173939c|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 @@ -24227,7 +24422,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|f403fcd69e4e44409627e748b404c086|" +msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 @@ -24251,7 +24446,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|4b00fe63870145968f8443619a792a42|" +msgid "|edcf9a04d96e42608fd01a333375febe|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 @@ -24275,7 +24470,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|368378731066486fa4397e89bc6b870c|" +msgid "|3dae22fe797043968e2b7aa7073c78bd|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 @@ -24298,7 +24493,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|a66aa83d85bf4ffba7ed660b718066da|" +msgid "|ba178f75267d4ad8aa7363f20709195f|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 @@ -24336,7 +24531,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|82324b9af72a4582a81839d55caab767|" +msgid "|c380c750bfd2444abce039a1c6fa8e60|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 @@ -24430,7 +24625,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +msgid "|e7cec00a114b48359935c6510595132e|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 @@ -30186,3 +30381,2328 @@ msgstr "" #~ msgid "|ed9aae51da70428eab7eef32f21e819e|" #~ msgstr "" +#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +#~ msgstr "" + +#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" +#~ msgstr "" + +#~ msgid "|cc080a555947492fa66131dc3a967603|" +#~ msgstr "" + +#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" +#~ msgstr "" + +#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" +#~ msgstr "" + +#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +#~ msgstr "" + +#~ msgid "|163117eb654a4273babba413cf8065f5|" +#~ msgstr "" + +#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +#~ msgstr "" + +#~ msgid "|f403fcd69e4e44409627e748b404c086|" +#~ msgstr "" + +#~ msgid "|4b00fe63870145968f8443619a792a42|" +#~ msgstr "" + +#~ msgid "|368378731066486fa4397e89bc6b870c|" +#~ msgstr "" + +#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" +#~ msgstr "" + +#~ msgid "|82324b9af72a4582a81839d55caab767|" +#~ msgstr "" + +#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +#~ msgstr "" + +#~ msgid "" +#~ "The Visual Studio Code Remote - " +#~ "Containers extension lets you use a " +#~ "Docker container as a fully-featured " +#~ "development environment. It allows you " +#~ "to open any folder inside (or " +#~ "mounted into) a container and take " +#~ "advantage of Visual Studio Code's full" +#~ " feature set. A :code:`devcontainer.json` " +#~ "file in your project tells VS Code" +#~ " how to access (or create) a " +#~ "development container with a well-" +#~ "defined tool and runtime stack. This " +#~ "container can be used to run an" +#~ " application or to separate tools, " +#~ "libraries, or runtimes needed for " +#~ "working with a codebase." +#~ msgstr "" + +#~ msgid "" +#~ "Configuring and setting up the " +#~ ":code:`Dockerfile` as well the configuration" +#~ " for the devcontainer can be a " +#~ "bit more involved. The good thing " +#~ "is you don't have to do it. " +#~ "Usually it should be enough to " +#~ "install `Docker " +#~ "`_ on your " +#~ "system and ensure its available on " +#~ "your command line. Additionally, install " +#~ "the `VSCode Containers Extension " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "If you prefer to use Anaconda for" +#~ " your virtual environment then install " +#~ "and setup the `conda " +#~ "`_ package. After setting" +#~ " it up you can create a virtual" +#~ " environment with:" +#~ msgstr "" + +#~ msgid "The :code:`SecAgg+` abstraction" +#~ msgstr "" + +#~ msgid "The :code:`LightSecAgg` abstraction" +#~ msgstr "" + +#~ msgid "" +#~ "A fork is a personal copy of " +#~ "a GitHub repository. To create one " +#~ "for Flower, you must navigate to " +#~ "``_ (while connected " +#~ "to your GitHub account) and click " +#~ "the ``Fork`` button situated on the " +#~ "top right of the page." +#~ msgstr "" + +#~ msgid "" +#~ "To check which files have been " +#~ "modified compared to the last version" +#~ " (last commit) and to see which " +#~ "files are staged for commit, you " +#~ "can use the :code:`git status` command." +#~ msgstr "" + +#~ msgid "" +#~ "Once you have added all the files" +#~ " you wanted to commit using " +#~ ":code:`git add`, you can finally create" +#~ " your commit using this command:" +#~ msgstr "" + +#~ msgid "" +#~ "The \\ is there to " +#~ "explain to others what the commit " +#~ "does. It should be written in an" +#~ " imperative style and be concise. An" +#~ " example would be :code:`git commit " +#~ "-m \"Add images to README\"`." +#~ msgstr "" + +#~ msgid "" +#~ ":doc:`Good first contributions `, where you" +#~ " should particularly look into the " +#~ ":code:`baselines` contributions." +#~ msgstr "" + +#~ msgid "" +#~ "Flower uses :code:`pyproject.toml` to manage" +#~ " dependencies and configure development " +#~ "tools (the ones which support it). " +#~ "Poetry is a build tool which " +#~ "supports `PEP 517 " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "Install `xz` (to install different " +#~ "Python versions) and `pandoc` to build" +#~ " the docs::" +#~ msgstr "" + +#~ msgid "" +#~ "Ensure you system (Ubuntu 22.04+) is " +#~ "up-to-date, and you have all " +#~ "necessary packages::" +#~ msgstr "" + +#~ msgid "" +#~ "1. Clone the `Flower repository " +#~ "`_ from GitHub::" +#~ msgstr "" + +#~ msgid "" +#~ "Let's create the Python environment for" +#~ " all-things Flower. If you wish " +#~ "to use :code:`pyenv`, we provide two " +#~ "convenience scripts that you can use." +#~ " If you prefer using something else" +#~ " than :code:`pyenv`, create a new " +#~ "environment, activate and skip to the" +#~ " last point where all packages are" +#~ " installed." +#~ msgstr "" + +#~ msgid "" +#~ "If you don't have :code:`pyenv` " +#~ "installed, the following script that " +#~ "will install it, set it up, and" +#~ " create the virtual environment (with " +#~ ":code:`Python 3.9.20` by default)::" +#~ msgstr "" + +#~ msgid "" +#~ "If you already have :code:`pyenv` " +#~ "installed (along with the :code:`pyenv-" +#~ "virtualenv` plugin), you can use the " +#~ "following convenience script (with " +#~ ":code:`Python 3.9.20` by default)::" +#~ msgstr "" + +#~ msgid "" +#~ "3. Install the Flower package in " +#~ "development mode (think :code:`pip install " +#~ "-e`) along with all necessary " +#~ "dependencies::" +#~ msgstr "" + +#~ msgid "" +#~ "The Flower repository contains a number" +#~ " of convenience scripts to make " +#~ "recurring development tasks easier and " +#~ "less error-prone. See the :code:`/dev`" +#~ " subdirectory for a full list. The" +#~ " following scripts are amongst the " +#~ "most important ones:" +#~ msgstr "" + +#~ msgid "" +#~ "If in a hurry, bypass the hook " +#~ "using ``--no-verify`` with the ``git " +#~ "commit`` command. ::" +#~ msgstr "" + +#~ msgid "" +#~ "Developers could run the full set " +#~ "of Github Actions workflows under their" +#~ " local environment by using `Act " +#~ "`_. Please refer to" +#~ " the installation instructions under the" +#~ " linked repository and run the next" +#~ " command under Flower main cloned " +#~ "repository folder::" +#~ msgstr "" + +#~ msgid "" +#~ "Flower uses Poetry to build releases." +#~ " The necessary command is wrapped in" +#~ " a simple script::" +#~ msgstr "" + +#~ msgid "" +#~ "The resulting :code:`.whl` and :code:`.tar.gz`" +#~ " releases will be stored in the " +#~ ":code:`/dist` subdirectory." +#~ msgstr "" + +#~ msgid "" +#~ "Flower's documentation uses `Sphinx " +#~ "`_. There's no " +#~ "convenience script to re-build the " +#~ "documentation yet, but it's pretty " +#~ "easy::" +#~ msgstr "" + +#~ msgid "" +#~ "Some quickstart examples may have " +#~ "limitations or requirements that prevent " +#~ "them from running on every environment." +#~ " For more information, please see " +#~ "`Limitations`_." +#~ msgstr "" + +#~ msgid "" +#~ "Change the application code. For " +#~ "example, change the ``seed`` in " +#~ "``quickstart_docker/task.py`` to ``43`` and " +#~ "save it:" +#~ msgstr "" + +#~ msgid "" +#~ "All files are revised based on " +#~ ":doc:`Example: PyTorch - From Centralized " +#~ "To Federated `. The only thing" +#~ " to do is modifying the file " +#~ "called :code:`cifar.py`, revised part is " +#~ "shown below:" +#~ msgstr "" + +#~ msgid "" +#~ "If you have read :doc:`Example: PyTorch" +#~ " - From Centralized To Federated " +#~ "`, the following parts are " +#~ "easy to follow, only :code:`get_parameters`" +#~ " and :code:`set_parameters` function in " +#~ ":code:`client.py` needed to revise. If " +#~ "not, please read the :doc:`Example: " +#~ "PyTorch - From Centralized To Federated" +#~ " `. first." +#~ msgstr "" + +#~ msgid "" +#~ "Our example consists of one *server* " +#~ "and two *clients*. In FedBN, " +#~ ":code:`server.py` keeps unchanged, we can " +#~ "start the server directly." +#~ msgstr "" + +#~ msgid "" +#~ "Finally, we will revise our *client* " +#~ "logic by changing :code:`get_parameters` and" +#~ " :code:`set_parameters` in :code:`client.py`, we" +#~ " will exclude batch normalization " +#~ "parameters from model parameter list " +#~ "when sending to or receiving from " +#~ "the server." +#~ msgstr "" + +#~ msgid "" +#~ "Let's create a new file called " +#~ ":code:`cifar.py` with all the components " +#~ "required for a traditional (centralized) " +#~ "training on CIFAR-10. First, all " +#~ "required packages (such as :code:`torch` " +#~ "and :code:`torchvision`) need to be " +#~ "imported. You can see that we do" +#~ " not import any package for federated" +#~ " learning. You can keep all these " +#~ "imports as they are even when we" +#~ " add the federated learning components " +#~ "at a later point." +#~ msgstr "" + +#~ msgid "" +#~ "As already mentioned we will use " +#~ "the CIFAR-10 dataset for this machine" +#~ " learning workload. The model architecture" +#~ " (a very simple Convolutional Neural " +#~ "Network) is defined in :code:`class " +#~ "Net()`." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`load_data()` function loads the " +#~ "CIFAR-10 training and test sets. The " +#~ ":code:`transform` normalized the data after" +#~ " loading." +#~ msgstr "" + +#~ msgid "" +#~ "We now need to define the training" +#~ " (function :code:`train()`) which loops " +#~ "over the training set, measures the " +#~ "loss, backpropagates it, and then takes" +#~ " one optimizer step for each batch" +#~ " of training examples." +#~ msgstr "" + +#~ msgid "" +#~ "The evaluation of the model is " +#~ "defined in the function :code:`test()`. " +#~ "The function loops over all test " +#~ "samples and measures the loss of " +#~ "the model based on the test " +#~ "dataset." +#~ msgstr "" + +#~ msgid "" +#~ "The concept is easy to understand. " +#~ "We have to start a *server* and" +#~ " then use the code in " +#~ ":code:`cifar.py` for the *clients* that " +#~ "are connected to the *server*. The " +#~ "*server* sends model parameters to the" +#~ " clients. The *clients* run the " +#~ "training and update the parameters. The" +#~ " updated parameters are sent back to" +#~ " the *server* which averages all " +#~ "received parameter updates. This describes " +#~ "one round of the federated learning " +#~ "process and we repeat this for " +#~ "multiple rounds." +#~ msgstr "" + +#~ msgid "" +#~ "Our example consists of one *server* " +#~ "and two *clients*. Let's set up " +#~ ":code:`server.py` first. The *server* needs" +#~ " to import the Flower package " +#~ ":code:`flwr`. Next, we use the " +#~ ":code:`start_server` function to start a " +#~ "server and tell it to perform " +#~ "three rounds of federated learning." +#~ msgstr "" + +#~ msgid "" +#~ "Finally, we will define our *client* " +#~ "logic in :code:`client.py` and build " +#~ "upon the previously defined centralized " +#~ "training in :code:`cifar.py`. Our *client* " +#~ "needs to import :code:`flwr`, but also" +#~ " :code:`torch` to update the parameters " +#~ "on our PyTorch model:" +#~ msgstr "" + +#~ msgid "" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`CifarClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "PyTorch or TensorFlow/Keras) because it " +#~ "avoids some of the boilerplate that " +#~ "would otherwise be necessary. " +#~ ":code:`CifarClient` needs to implement four" +#~ " methods, two methods for getting/setting" +#~ " model parameters, one method for " +#~ "training the model, and one method " +#~ "for testing the model:" +#~ msgstr "" + +#~ msgid ":code:`set_parameters`" +#~ msgstr "" + +#~ msgid "" +#~ "loop over the list of model " +#~ "parameters received as NumPy :code:`ndarray`'s" +#~ " (think list of neural network " +#~ "layers)" +#~ msgstr "" + +#~ msgid ":code:`get_parameters`" +#~ msgstr "" + +#~ msgid "" +#~ "get the model parameters and return " +#~ "them as a list of NumPy " +#~ ":code:`ndarray`'s (which is what " +#~ ":code:`flwr.client.NumPyClient` expects)" +#~ msgstr "" + +#~ msgid ":code:`fit`" +#~ msgstr "" + +#~ msgid ":code:`evaluate`" +#~ msgstr "" + +#~ msgid "" +#~ "The two :code:`NumPyClient` methods " +#~ ":code:`fit` and :code:`evaluate` make use " +#~ "of the functions :code:`train()` and " +#~ ":code:`test()` previously defined in " +#~ ":code:`cifar.py`. So what we really do" +#~ " here is we tell Flower through " +#~ "our :code:`NumPyClient` subclass which of " +#~ "our already defined functions to call" +#~ " for training and evaluation. We " +#~ "included type annotations to give you" +#~ " a better understanding of the data" +#~ " types that get passed around." +#~ msgstr "" + +#~ msgid "" +#~ "All that's left to do it to " +#~ "define a function that loads both " +#~ "model and data, creates a " +#~ ":code:`CifarClient`, and starts this client." +#~ " You load your data and model " +#~ "by using :code:`cifar.py`. Start " +#~ ":code:`CifarClient` with the function " +#~ ":code:`fl.client.start_client()` by pointing it " +#~ "at the same IP address we used " +#~ "in :code:`server.py`:" +#~ msgstr "" + +#~ msgid "" +#~ "\\small\n" +#~ "\\frac{∆ \\times \\sqrt{2 \\times " +#~ "\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +#~ "\n" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`Strategy` abstraction provides a" +#~ " method called :code:`evaluate` that can" +#~ " directly be used to evaluate the " +#~ "current global model parameters. The " +#~ "current server implementation calls " +#~ ":code:`evaluate` after parameter aggregation " +#~ "and before federated evaluation (see " +#~ "next paragraph)." +#~ msgstr "" + +#~ msgid "" +#~ "Client-side evaluation happens in the" +#~ " :code:`Client.evaluate` method and can be" +#~ " configured from the server side." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`fraction_evaluate`: a :code:`float` defining" +#~ " the fraction of clients that will" +#~ " be selected for evaluation. If " +#~ ":code:`fraction_evaluate` is set to " +#~ ":code:`0.1` and :code:`100` clients are " +#~ "connected to the server, then :code:`10`" +#~ " will be randomly selected for " +#~ "evaluation. If :code:`fraction_evaluate` is " +#~ "set to :code:`0.0`, federated evaluation " +#~ "will be disabled." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`min_evaluate_clients`: an :code:`int`: the" +#~ " minimum number of clients to be " +#~ "selected for evaluation. If " +#~ ":code:`fraction_evaluate` is set to " +#~ ":code:`0.1`, :code:`min_evaluate_clients` is set " +#~ "to 20, and :code:`100` clients are " +#~ "connected to the server, then :code:`20`" +#~ " clients will be selected for " +#~ "evaluation." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`min_available_clients`: an :code:`int` that" +#~ " defines the minimum number of " +#~ "clients which need to be connected " +#~ "to the server before a round of" +#~ " federated evaluation can start. If " +#~ "fewer than :code:`min_available_clients` are " +#~ "connected to the server, the server " +#~ "will wait until more clients are " +#~ "connected before it continues to sample" +#~ " clients for evaluation." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`on_evaluate_config_fn`: a function that " +#~ "returns a configuration dictionary which " +#~ "will be sent to the selected " +#~ "clients. The function will be called " +#~ "during each round and provides a " +#~ "convenient way to customize client-side" +#~ " evaluation from the server side, for" +#~ " example, to configure the number of" +#~ " validation steps performed." +#~ msgstr "" + +#~ msgid "" +#~ "Model parameters can also be evaluated" +#~ " during training. :code:`Client.fit` can " +#~ "return arbitrary evaluation results as a" +#~ " dictionary:" +#~ msgstr "" + +#~ msgid "" +#~ "The same :code:`Strategy`-customization approach " +#~ "can be used to aggregate custom " +#~ "evaluation results coming from individual " +#~ "clients. Clients can return custom " +#~ "metrics to the server by returning " +#~ "a dictionary:" +#~ msgstr "" + +#~ msgid "Enable node authentication in :code:`SuperLink`" +#~ msgstr "" + +#~ msgid "" +#~ "To enable node authentication, first you" +#~ " need to configure SSL/TLS connections " +#~ "to secure the SuperLink<>SuperNode " +#~ "communication. You can find the complete" +#~ " guide `here `_. After " +#~ "configuring secure connections, you can " +#~ "enable client authentication in a " +#~ "long-running Flower :code:`SuperLink`. Use " +#~ "the following terminal command to start" +#~ " a Flower :code:`SuperNode` that has " +#~ "both secure connections and node " +#~ "authentication enabled:" +#~ msgstr "" + +#~ msgid "" +#~ "The first flag :code:`--auth-list-" +#~ "public-keys` expects a path to a " +#~ "CSV file storing all known node " +#~ "public keys. You need to store all" +#~ " known node public keys that are " +#~ "allowed to participate in a federation" +#~ " in one CSV file (:code:`.csv`)." +#~ msgstr "" + +#~ msgid "" +#~ "The second and third flags :code" +#~ ":`--auth-superlink-private-key` and :code" +#~ ":`--auth-superlink-public-key` expect paths" +#~ " to the server's private and public" +#~ " keys. For development purposes, you " +#~ "can generate a private and public " +#~ "key pair using :code:`ssh-keygen -t " +#~ "ecdsa -b 384`." +#~ msgstr "" + +#~ msgid "Enable node authentication in :code:`SuperNode`" +#~ msgstr "" + +#~ msgid "" +#~ "Similar to the long-running Flower " +#~ "server (:code:`SuperLink`), you can easily " +#~ "enable node authentication in the " +#~ "long-running Flower client (:code:`SuperNode`)." +#~ " Use the following terminal command " +#~ "to start an authenticated :code:`SuperNode`:" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`--auth-supernode-private-key` " +#~ "flag expects a path to the node's" +#~ " private key file and the :code" +#~ ":`--auth-supernode-public-key` flag expects" +#~ " a path to the node's public " +#~ "key file. For development purposes, you" +#~ " can generate a private and public" +#~ " key pair using :code:`ssh-keygen -t" +#~ " ecdsa -b 384`." +#~ msgstr "" + +#~ msgid "" +#~ "You should now have learned how to" +#~ " start a long-running Flower server" +#~ " (:code:`SuperLink`) and client " +#~ "(:code:`SuperNode`) with node authentication " +#~ "enabled. You should also know the " +#~ "significance of the private key and " +#~ "store it safely to minimize security " +#~ "risks." +#~ msgstr "" + +#~ msgid "" +#~ "The easiest way to send configuration" +#~ " values to clients is to use a" +#~ " built-in strategy like :code:`FedAvg`. " +#~ "Built-in strategies support so-called" +#~ " configuration functions. A configuration " +#~ "function is a function that the " +#~ "built-in strategy calls to get the" +#~ " configuration dictionary for the current" +#~ " round. It then forwards the " +#~ "configuration dictionary to all the " +#~ "clients selected during that round." +#~ msgstr "" + +#~ msgid "" +#~ "To make the built-in strategies " +#~ "use this function, we can pass it" +#~ " to ``FedAvg`` during initialization using" +#~ " the parameter :code:`on_fit_config_fn`:" +#~ msgstr "" + +#~ msgid "The :code:`FedAvg` strategy will call this function *every round*." +#~ msgstr "" + +#~ msgid "" +#~ "This can be achieved by customizing " +#~ "an existing strategy or by " +#~ ":doc:`implementing a custom strategy from " +#~ "scratch `. " +#~ "Here's a nonsensical example that " +#~ "customizes :code:`FedAvg` by adding a " +#~ "custom ``\"hello\": \"world\"`` configuration " +#~ "key/value pair to the config dict " +#~ "of a *single client* (only the " +#~ "first client in the list, the " +#~ "other clients in this round to not" +#~ " receive this \"special\" config value):" +#~ msgstr "" + +#~ msgid "" +#~ "containing relevant information including: log" +#~ " message level (e.g. :code:`INFO`, " +#~ ":code:`DEBUG`), a timestamp, the line " +#~ "where the logging took place from, " +#~ "as well as the log message itself." +#~ " In this way, the logger would " +#~ "typically display information on your " +#~ "terminal as follows:" +#~ msgstr "" + +#~ msgid "" +#~ "By default, the Flower log is " +#~ "outputted to the terminal where you " +#~ "launch your Federated Learning workload " +#~ "from. This applies for both gRPC-" +#~ "based federation (i.e. when you do " +#~ ":code:`fl.server.start_server`) and when using " +#~ "the :code:`VirtualClientEngine` (i.e. when you" +#~ " do :code:`fl.simulation.start_simulation`). In " +#~ "some situations you might want to " +#~ "save this log to disk. You can " +#~ "do so by calling the " +#~ "`fl.common.logger.configure() " +#~ "`_" +#~ " function. For example:" +#~ msgstr "" + +#~ msgid "" +#~ "With the above, Flower will record " +#~ "the log you see on your terminal" +#~ " to :code:`log.txt`. This file will " +#~ "be created in the same directory " +#~ "as were you are running the code" +#~ " from. If we inspect we see the" +#~ " log above is also recorded but " +#~ "prefixing with :code:`identifier` each line:" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`fl.common.logger.configure` function, " +#~ "also allows specifying a host to " +#~ "which logs can be pushed (via " +#~ ":code:`POST`) through a native Python " +#~ ":code:`logging.handler.HTTPHandler`. This is a " +#~ "particularly useful feature in " +#~ ":code:`gRPC`-based Federated Learning workloads " +#~ "where otherwise gathering logs from all" +#~ " entities (i.e. the server and the" +#~ " clients) might be cumbersome. Note " +#~ "that in Flower simulation, the server" +#~ " automatically displays all logs. You " +#~ "can still specify a :code:`HTTPHandler` " +#~ "should you wish to backup or " +#~ "analyze the logs somewhere else." +#~ msgstr "" + +#~ msgid "" +#~ "This guide describes how to a " +#~ "SSL-enabled secure Flower server " +#~ "(:code:`SuperLink`) can be started and " +#~ "how a Flower client (:code:`SuperNode`) " +#~ "can establish a secure connections to" +#~ " it." +#~ msgstr "" + +#~ msgid "" +#~ "The code example comes with a " +#~ ":code:`README.md` file which explains how " +#~ "to start it. Although it is " +#~ "already SSL-enabled, it might be " +#~ "less descriptive on how it does " +#~ "so. Stick to this guide for a " +#~ "deeper introduction to the topic." +#~ msgstr "" + +#~ msgid "" +#~ "Using SSL-enabled connections requires " +#~ "certificates to be passed to the " +#~ "server and client. For the purpose " +#~ "of this guide we are going to " +#~ "generate self-signed certificates. As " +#~ "this can become quite complex we " +#~ "are going to ask you to run " +#~ "the script in :code:`examples/advanced-" +#~ "tensorflow/certificates/generate.sh` with the " +#~ "following command sequence:" +#~ msgstr "" + +#~ msgid "" +#~ "This will generate the certificates in" +#~ " :code:`examples/advanced-tensorflow/.cache/certificates`." +#~ msgstr "" + +#~ msgid "" +#~ "When setting :code:`root_certificates`, the " +#~ "client expects a file path to " +#~ "PEM-encoded root certificates." +#~ msgstr "" + +#~ msgid "The :code:`Strategy` abstraction" +#~ msgstr "" + +#~ msgid "" +#~ "All strategy implementation are derived " +#~ "from the abstract base class " +#~ ":code:`flwr.server.strategy.Strategy`, both built-in" +#~ " implementations and third party " +#~ "implementations. This means that custom " +#~ "strategy implementations have the exact " +#~ "same capabilities at their disposal as" +#~ " built-in ones." +#~ msgstr "" + +#~ msgid "" +#~ "Creating a new strategy means " +#~ "implementing a new :code:`class` (derived " +#~ "from the abstract base class " +#~ ":code:`Strategy`) that implements for the " +#~ "previously shown abstract methods:" +#~ msgstr "" + +#~ msgid "The :code:`initialize_parameters` method" +#~ msgstr "" + +#~ msgid "" +#~ ":code:`initialize_parameters` is called only " +#~ "once, at the very beginning of an" +#~ " execution. It is responsible for " +#~ "providing the initial global model " +#~ "parameters in a serialized form (i.e.," +#~ " as a :code:`Parameters` object)." +#~ msgstr "" + +#~ msgid "" +#~ "Built-in strategies return user-provided" +#~ " initial parameters. The following example" +#~ " shows how initial parameters can be" +#~ " passed to :code:`FedAvg`:" +#~ msgstr "" + +#~ msgid "" +#~ "The Flower server will call " +#~ ":code:`initialize_parameters`, which either returns" +#~ " the parameters that were passed to" +#~ " :code:`initial_parameters`, or :code:`None`. If" +#~ " no parameters are returned from " +#~ ":code:`initialize_parameters` (i.e., :code:`None`), " +#~ "the server will randomly select one " +#~ "client and ask it to provide its" +#~ " parameters. This is a convenience " +#~ "feature and not recommended in practice," +#~ " but it can be useful for " +#~ "prototyping. In practice, it is " +#~ "recommended to always use server-side" +#~ " parameter initialization." +#~ msgstr "" + +#~ msgid "The :code:`configure_fit` method" +#~ msgstr "" + +#~ msgid "" +#~ ":code:`configure_fit` is responsible for " +#~ "configuring the upcoming round of " +#~ "training. What does *configure* mean in" +#~ " this context? Configuring a round " +#~ "means selecting clients and deciding " +#~ "what instructions to send to these " +#~ "clients. The signature of " +#~ ":code:`configure_fit` makes this clear:" +#~ msgstr "" + +#~ msgid "" +#~ "The return value is a list of " +#~ "tuples, each representing the instructions " +#~ "that will be sent to a particular" +#~ " client. Strategy implementations usually " +#~ "perform the following steps in " +#~ ":code:`configure_fit`:" +#~ msgstr "" + +#~ msgid "" +#~ "Use the :code:`client_manager` to randomly " +#~ "sample all (or a subset of) " +#~ "available clients (each represented as a" +#~ " :code:`ClientProxy` object)" +#~ msgstr "" + +#~ msgid "" +#~ "Pair each :code:`ClientProxy` with the " +#~ "same :code:`FitIns` holding the current " +#~ "global model :code:`parameters` and " +#~ ":code:`config` dict" +#~ msgstr "" + +#~ msgid "" +#~ "More sophisticated implementations can use " +#~ ":code:`configure_fit` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the list returned from " +#~ ":code:`configure_fit`." +#~ msgstr "" + +#~ msgid "" +#~ "The structure of this return value " +#~ "provides a lot of flexibility to " +#~ "the user. Since instructions are defined" +#~ " on a per-client basis, different " +#~ "instructions can be sent to each " +#~ "client. This enables custom strategies " +#~ "to train, for example, different models" +#~ " on different clients, or use " +#~ "different hyperparameters on different clients" +#~ " (via the :code:`config` dict)." +#~ msgstr "" + +#~ msgid "The :code:`aggregate_fit` method" +#~ msgstr "" + +#~ msgid "" +#~ ":code:`aggregate_fit` is responsible for " +#~ "aggregating the results returned by the" +#~ " clients that were selected and asked" +#~ " to train in :code:`configure_fit`." +#~ msgstr "" + +#~ msgid "" +#~ "Of course, failures can happen, so " +#~ "there is no guarantee that the " +#~ "server will get results from all " +#~ "the clients it sent instructions to " +#~ "(via :code:`configure_fit`). :code:`aggregate_fit` " +#~ "therefore receives a list of " +#~ ":code:`results`, but also a list of " +#~ ":code:`failures`." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`aggregate_fit` returns an optional " +#~ ":code:`Parameters` object and a dictionary " +#~ "of aggregated metrics. The :code:`Parameters`" +#~ " return value is optional because " +#~ ":code:`aggregate_fit` might decide that the" +#~ " results provided are not sufficient " +#~ "for aggregation (e.g., too many " +#~ "failures)." +#~ msgstr "" + +#~ msgid "The :code:`configure_evaluate` method" +#~ msgstr "" + +#~ msgid "" +#~ ":code:`configure_evaluate` is responsible for " +#~ "configuring the upcoming round of " +#~ "evaluation. What does *configure* mean " +#~ "in this context? Configuring a round " +#~ "means selecting clients and deciding " +#~ "what instructions to send to these " +#~ "clients. The signature of " +#~ ":code:`configure_evaluate` makes this clear:" +#~ msgstr "" + +#~ msgid "" +#~ "The return value is a list of " +#~ "tuples, each representing the instructions " +#~ "that will be sent to a particular" +#~ " client. Strategy implementations usually " +#~ "perform the following steps in " +#~ ":code:`configure_evaluate`:" +#~ msgstr "" + +#~ msgid "" +#~ "Pair each :code:`ClientProxy` with the " +#~ "same :code:`EvaluateIns` holding the current" +#~ " global model :code:`parameters` and " +#~ ":code:`config` dict" +#~ msgstr "" + +#~ msgid "" +#~ "More sophisticated implementations can use " +#~ ":code:`configure_evaluate` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the list returned from " +#~ ":code:`configure_evaluate`." +#~ msgstr "" + +#~ msgid "" +#~ "The structure of this return value " +#~ "provides a lot of flexibility to " +#~ "the user. Since instructions are defined" +#~ " on a per-client basis, different " +#~ "instructions can be sent to each " +#~ "client. This enables custom strategies " +#~ "to evaluate, for example, different " +#~ "models on different clients, or use " +#~ "different hyperparameters on different clients" +#~ " (via the :code:`config` dict)." +#~ msgstr "" + +#~ msgid "The :code:`aggregate_evaluate` method" +#~ msgstr "" + +#~ msgid "" +#~ ":code:`aggregate_evaluate` is responsible for " +#~ "aggregating the results returned by the" +#~ " clients that were selected and asked" +#~ " to evaluate in :code:`configure_evaluate`." +#~ msgstr "" + +#~ msgid "" +#~ "Of course, failures can happen, so " +#~ "there is no guarantee that the " +#~ "server will get results from all " +#~ "the clients it sent instructions to " +#~ "(via :code:`configure_evaluate`). " +#~ ":code:`aggregate_evaluate` therefore receives a " +#~ "list of :code:`results`, but also a " +#~ "list of :code:`failures`." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`aggregate_evaluate` returns an optional " +#~ ":code:`float` (loss) and a dictionary of" +#~ " aggregated metrics. The :code:`float` " +#~ "return value is optional because " +#~ ":code:`aggregate_evaluate` might decide that " +#~ "the results provided are not sufficient" +#~ " for aggregation (e.g., too many " +#~ "failures)." +#~ msgstr "" + +#~ msgid "The :code:`evaluate` method" +#~ msgstr "" + +#~ msgid "" +#~ ":code:`evaluate` is responsible for evaluating" +#~ " model parameters on the server-side." +#~ " Having :code:`evaluate` in addition to " +#~ ":code:`configure_evaluate`/:code:`aggregate_evaluate` enables" +#~ " strategies to perform both servers-" +#~ "side and client-side (federated) " +#~ "evaluation." +#~ msgstr "" + +#~ msgid "" +#~ "The return value is again optional " +#~ "because the strategy might not need " +#~ "to implement server-side evaluation or" +#~ " because the user-defined :code:`evaluate`" +#~ " method might not complete successfully " +#~ "(e.g., it might fail to load the" +#~ " server-side evaluation data)." +#~ msgstr "" + +#~ msgid "" +#~ "Stable releases are available on `PyPI" +#~ " `_::" +#~ msgstr "" + +#~ msgid "" +#~ "For simulations that use the Virtual " +#~ "Client Engine, ``flwr`` should be " +#~ "installed with the ``simulation`` extra::" +#~ msgstr "" + +#~ msgid "" +#~ "If you have not added ``conda-" +#~ "forge`` to your channels, you will " +#~ "first need to run the following::" +#~ msgstr "" + +#~ msgid "" +#~ "Once the ``conda-forge`` channel has " +#~ "been enabled, ``flwr`` can be installed" +#~ " with ``conda``::" +#~ msgstr "" + +#~ msgid "or with ``mamba``::" +#~ msgstr "" + +#~ msgid "" +#~ "New (possibly unstable) versions of " +#~ "Flower are sometimes available as " +#~ "pre-release versions (alpha, beta, release" +#~ " candidate) before the stable release " +#~ "happens::" +#~ msgstr "" + +#~ msgid "" +#~ "For simulations that use the Virtual " +#~ "Client Engine, ``flwr`` pre-releases " +#~ "should be installed with the " +#~ "``simulation`` extra::" +#~ msgstr "" + +#~ msgid "" +#~ "The latest (potentially unstable) changes " +#~ "in Flower are available as nightly " +#~ "releases::" +#~ msgstr "" + +#~ msgid "" +#~ "For simulations that use the Virtual " +#~ "Client Engine, ``flwr-nightly`` should " +#~ "be installed with the ``simulation`` " +#~ "extra::" +#~ msgstr "" + +#~ msgid "You can look at everything at ``_ ." +#~ msgstr "" + +#~ msgid "" +#~ "After you finish the visualization, stop" +#~ " Prometheus and Grafana. This is " +#~ "important as they will otherwise block," +#~ " for example port :code:`3000` on " +#~ "your machine as long as they are" +#~ " running." +#~ msgstr "" + +#~ msgid "" +#~ "In the example above, only one " +#~ "client will be run, so your " +#~ "clients won't run concurrently. Setting " +#~ ":code:`client_num_gpus = 0.5` would allow " +#~ "running two clients and therefore enable" +#~ " them to run concurrently. Be careful" +#~ " not to require more resources than" +#~ " available. If you specified " +#~ ":code:`client_num_gpus = 2`, the simulation" +#~ " wouldn't start (even if you had " +#~ "2 GPUs but decided to set 1 " +#~ "in :code:`ray_init_args`)." +#~ msgstr "" + +#~ msgid "" +#~ "Q: I see \"This site can't be " +#~ "reached\" when going to " +#~ "``_." +#~ msgstr "" + +#~ msgid "" +#~ "Ray Dashboard: ``_" +#~ msgstr "" + +#~ msgid "Ray Metrics: ``_" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`VirtualClientEngine` schedules, launches" +#~ " and manages `virtual` clients. These " +#~ "clients are identical to `non-virtual`" +#~ " clients (i.e. the ones you launch" +#~ " via the command `flwr.client.start_client " +#~ "`_) in the" +#~ " sense that they can be configure " +#~ "by creating a class inheriting, for " +#~ "example, from `flwr.client.NumPyClient `_ and therefore" +#~ " behave in an identical way. In " +#~ "addition to that, clients managed by " +#~ "the :code:`VirtualClientEngine` are:" +#~ msgstr "" + +#~ msgid "" +#~ "self-managed: this means that you " +#~ "as a user do not need to " +#~ "launch clients manually, instead this " +#~ "gets delegated to :code:`VirtualClientEngine`'s " +#~ "internals." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`VirtualClientEngine` implements `virtual`" +#~ " clients using `Ray `_, " +#~ "an open-source framework for scalable" +#~ " Python workloads. In particular, Flower's" +#~ " :code:`VirtualClientEngine` makes use of " +#~ "`Actors `_ to spawn `virtual` clients" +#~ " and run their workload." +#~ msgstr "" + +#~ msgid "" +#~ "By default the VCE has access to" +#~ " all system resources (i.e. all CPUs," +#~ " all GPUs, etc) since that is " +#~ "also the default behavior when starting" +#~ " Ray. However, in some settings you" +#~ " might want to limit how many " +#~ "of your system resources are used " +#~ "for simulation. You can do this " +#~ "via the :code:`ray_init_args` input argument" +#~ " to :code:`start_simulation` which the VCE" +#~ " internally passes to Ray's " +#~ ":code:`ray.init` command. For a complete " +#~ "list of settings you can configure " +#~ "check the `ray.init `_ " +#~ "documentation. Do not set " +#~ ":code:`ray_init_args` if you want the " +#~ "VCE to use all your system's CPUs" +#~ " and GPUs." +#~ msgstr "" + +#~ msgid "" +#~ "By default the :code:`VirtualClientEngine` " +#~ "assigns a single CPU core (and " +#~ "nothing else) to each virtual client." +#~ " This means that if your system " +#~ "has 10 cores, that many virtual " +#~ "clients can be concurrently running." +#~ msgstr "" + +#~ msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`num_gpus` indicates the **ratio** of" +#~ " GPU memory a client gets assigned." +#~ msgstr "" + +#~ msgid "" +#~ "While the :code:`client_resources` can be " +#~ "used to control the degree of " +#~ "concurrency in your FL simulation, this" +#~ " does not stop you from running " +#~ "dozens, hundreds or even thousands of" +#~ " clients in the same round and " +#~ "having orders of magnitude more " +#~ "`dormant` (i.e. not participating in a" +#~ " round) clients. Let's say you want" +#~ " to have 100 clients per round " +#~ "but your system can only accommodate " +#~ "8 clients concurrently. The " +#~ ":code:`VirtualClientEngine` will schedule 100 " +#~ "jobs to run (each simulating a " +#~ "client sampled by the strategy) and " +#~ "then will execute them in a " +#~ "resource-aware manner in batches of " +#~ "8." +#~ msgstr "" + +#~ msgid "" +#~ "Flower's :code:`VirtualClientEngine` allows you " +#~ "to run FL simulations across multiple" +#~ " compute nodes. Before starting your " +#~ "multi-node simulation ensure that you:" +#~ msgstr "" + +#~ msgid "" +#~ "Pass :code:`ray_init_args={\"address\"=\"auto\"}` to " +#~ "`start_simulation `_ so the " +#~ ":code:`VirtualClientEngine` attaches to a " +#~ "running Ray instance." +#~ msgstr "" + +#~ msgid "" +#~ "Start Ray on you head node: on " +#~ "the terminal type :code:`ray start " +#~ "--head`. This command will print a " +#~ "few lines, one of which indicates " +#~ "how to attach other nodes to the" +#~ " head node." +#~ msgstr "" + +#~ msgid "" +#~ "Attach other nodes to the head " +#~ "node: copy the command shown after " +#~ "starting the head and execute it " +#~ "on terminal of a new node: for " +#~ "example :code:`ray start " +#~ "--address='192.168.1.132:6379'`" +#~ msgstr "" + +#~ msgid "" +#~ "Once your simulation is finished, if " +#~ "you'd like to dismantle your cluster " +#~ "you simply need to run the command" +#~ " :code:`ray stop` in each node's " +#~ "terminal (including the head node)." +#~ msgstr "" + +#~ msgid "" +#~ "User :code:`ray status` to check all " +#~ "nodes connected to your head node " +#~ "as well as the total resources " +#~ "available to the :code:`VirtualClientEngine`." +#~ msgstr "" + +#~ msgid "" +#~ "When attaching a new node to the" +#~ " head, all its resources (i.e. all" +#~ " CPUs, all GPUs) will be visible " +#~ "by the head node. This means that" +#~ " the :code:`VirtualClientEngine` can schedule " +#~ "as many `virtual` clients as that " +#~ "node can possible run. In some " +#~ "settings you might want to exclude " +#~ "certain resources from the simulation. " +#~ "You can do this by appending " +#~ "`--num-cpus=` and/or `--num-" +#~ "gpus=` in any :code:`ray " +#~ "start` command (including when starting " +#~ "the head)" +#~ msgstr "" + +#~ msgid "" +#~ "The VCE assigns a share of GPU " +#~ "memory to a client that specifies " +#~ "the key :code:`num_gpus` in " +#~ ":code:`client_resources`. This being said, Ray" +#~ " (used internally by the VCE) is " +#~ "by default:" +#~ msgstr "" + +#~ msgid "" +#~ "not aware of the total VRAM " +#~ "available on the GPUs. This means " +#~ "that if you set :code:`num_gpus=0.5` and" +#~ " you have two GPUs in your " +#~ "system with different (e.g. 32GB and " +#~ "8GB) VRAM amounts, they both would " +#~ "run 2 clients concurrently." +#~ msgstr "" + +#~ msgid "" +#~ "If you want to run several " +#~ "independent Flower simulations on the " +#~ "same machine you need to mask-out" +#~ " your GPUs with " +#~ ":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching" +#~ " your experiment." +#~ msgstr "" + +#~ msgid "" +#~ "In addition, the GPU resource limits " +#~ "passed to :code:`client_resources` are not " +#~ "`enforced` (i.e. they can be exceeded)" +#~ " which can result in the situation" +#~ " of client using more VRAM than " +#~ "the ratio specified when starting the" +#~ " simulation." +#~ msgstr "" + +#~ msgid "" +#~ "This would need to be done in " +#~ "the main process (which is where " +#~ "the server would run) and in each" +#~ " Actor created by the VCE. By " +#~ "means of :code:`actor_kwargs` we can " +#~ "pass the reserved key `\"on_actor_init_fn\"`" +#~ " in order to specify a function " +#~ "to be executed upon actor " +#~ "initialization. In this case, to enable" +#~ " GPU growth for TF workloads. It " +#~ "would look as follows:" +#~ msgstr "" + +#~ msgid "" +#~ "Model updates can be persisted on " +#~ "the server-side by customizing " +#~ ":code:`Strategy` methods. Implementing custom " +#~ "strategies is always an option, but " +#~ "for many cases it may be more " +#~ "convenient to simply customize an " +#~ "existing strategy. The following code " +#~ "example defines a new " +#~ ":code:`SaveModelStrategy` which customized the " +#~ "existing built-in :code:`FedAvg` strategy. " +#~ "In particular, it customizes " +#~ ":code:`aggregate_fit` by calling " +#~ ":code:`aggregate_fit` in the base class " +#~ "(:code:`FedAvg`). It then continues to " +#~ "save returned (aggregated) weights before " +#~ "it returns those aggregated weights to" +#~ " the caller (i.e., the server):" +#~ msgstr "" + +#~ msgid "" +#~ "For central DP with server-side " +#~ "clipping, there are two :code:`Strategy` " +#~ "classes that act as wrappers around " +#~ "the actual :code:`Strategy` instance (for " +#~ "example, :code:`FedAvg`). The two wrapper " +#~ "classes are " +#~ ":code:`DifferentialPrivacyServerSideFixedClipping` and " +#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` for " +#~ "fixed and adaptive clipping." +#~ msgstr "" + +#~ msgid "" +#~ "The code sample below enables the " +#~ ":code:`FedAvg` strategy to use server-" +#~ "side fixed clipping using the " +#~ ":code:`DifferentialPrivacyServerSideFixedClipping` wrapper " +#~ "class. The same approach can be " +#~ "used with " +#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` by " +#~ "adjusting the corresponding input parameters." +#~ msgstr "" + +#~ msgid "" +#~ "For central DP with client-side " +#~ "clipping, the server sends the clipping" +#~ " value to selected clients on each" +#~ " round. Clients can use existing " +#~ "Flower :code:`Mods` to perform the " +#~ "clipping. Two mods are available for " +#~ "fixed and adaptive client-side clipping:" +#~ " :code:`fixedclipping_mod` and " +#~ ":code:`adaptiveclipping_mod` with corresponding " +#~ "server-side wrappers " +#~ ":code:`DifferentialPrivacyClientSideFixedClipping` and " +#~ ":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +#~ msgstr "" + +#~ msgid "" +#~ "The code sample below enables the " +#~ ":code:`FedAvg` strategy to use differential" +#~ " privacy with client-side fixed " +#~ "clipping using both the " +#~ ":code:`DifferentialPrivacyClientSideFixedClipping` wrapper " +#~ "class and, on the client, " +#~ ":code:`fixedclipping_mod`:" +#~ msgstr "" + +#~ msgid "" +#~ "In addition to the server-side " +#~ "strategy wrapper, the :code:`ClientApp` needs" +#~ " to configure the matching " +#~ ":code:`fixedclipping_mod` to perform the " +#~ "client-side clipping:" +#~ msgstr "" + +#~ msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +#~ msgstr "" + +#~ msgid "" +#~ "Flower allows full customization of the" +#~ " learning process through the " +#~ ":code:`Strategy` abstraction. A number of " +#~ "built-in strategies are provided in " +#~ "the core framework." +#~ msgstr "" + +#~ msgid "Use an existing strategy, for example, :code:`FedAvg`" +#~ msgstr "" + +#~ msgid "" +#~ "This creates a strategy with all " +#~ "parameters left at their default values" +#~ " and passes it to the " +#~ ":code:`start_server` function. It is usually" +#~ " recommended to adjust a few " +#~ "parameters during instantiation:" +#~ msgstr "" + +#~ msgid "" +#~ "The server can pass new configuration" +#~ " values to the client each round " +#~ "by providing a function to " +#~ ":code:`on_fit_config_fn`. The provided function " +#~ "will be called by the strategy and" +#~ " must return a dictionary of " +#~ "configuration key values pairs that will" +#~ " be sent to the client. It must" +#~ " return a dictionary of arbitrary " +#~ "configuration values :code:`client.fit` and " +#~ ":code:`client.evaluate` functions during each " +#~ "round of federated learning." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`on_fit_config_fn` can be used " +#~ "to pass arbitrary configuration values " +#~ "from server to client, and potentially" +#~ " change these values each round, for" +#~ " example, to adjust the learning " +#~ "rate. The client will receive the " +#~ "dictionary returned by the " +#~ ":code:`on_fit_config_fn` in its own " +#~ ":code:`client.fit()` function." +#~ msgstr "" + +#~ msgid "" +#~ "Similar to :code:`on_fit_config_fn`, there is" +#~ " also :code:`on_evaluate_config_fn` to customize" +#~ " the configuration sent to " +#~ ":code:`client.evaluate()`" +#~ msgstr "" + +#~ msgid "" +#~ "Server-side evaluation can be enabled" +#~ " by passing an evaluation function to" +#~ " :code:`evaluate_fn`." +#~ msgstr "" + +#~ msgid "" +#~ "Note that since version :code:`1.11.0`, " +#~ ":code:`flower-server-app` no longer " +#~ "supports passing a reference to a " +#~ "`ServerApp` attribute. Instead, you need " +#~ "to pass the path to Flower app " +#~ "via the argument :code:`--app`. This is" +#~ " the path to a directory containing" +#~ " a `pyproject.toml`. You can create a" +#~ " valid Flower app by executing " +#~ ":code:`flwr new` and following the " +#~ "prompt." +#~ msgstr "" + +#~ msgid "" +#~ "The following examples are available as" +#~ " standalone projects. Quickstart TensorFlow/Keras" +#~ " ---------------------------" +#~ msgstr "" + +#~ msgid "" +#~ "Let's create a new application project" +#~ " in Xcode and add :code:`flwr` as " +#~ "a dependency in your project. For " +#~ "our application, we will store the " +#~ "logic of our app in " +#~ ":code:`FLiOSModel.swift` and the UI elements" +#~ " in :code:`ContentView.swift`. We will " +#~ "focus more on :code:`FLiOSModel.swift` in " +#~ "this quickstart. Please refer to the " +#~ "`full code example " +#~ "`_ to " +#~ "learn more about the app." +#~ msgstr "" + +#~ msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#~ msgstr "" + +#~ msgid "" +#~ "Then add the mlmodel to the " +#~ "project simply by drag-and-drop, " +#~ "the mlmodel will be bundled inside " +#~ "the application during deployment to " +#~ "your iOS device. We need to pass" +#~ " the url to access mlmodel and " +#~ "run CoreML machine learning processes, " +#~ "it can be retrieved by calling the" +#~ " function :code:`Bundle.main.url`. For the " +#~ "MNIST dataset, we need to preprocess " +#~ "it into :code:`MLBatchProvider` object. The" +#~ " preprocessing is done inside " +#~ ":code:`DataLoader.swift`." +#~ msgstr "" + +#~ msgid "" +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this information beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." +#~ msgstr "" + +#~ msgid "" +#~ "Then start the Flower gRPC client " +#~ "and start communicating to the server" +#~ " by passing our Flower client to " +#~ "the function :code:`startFlwrGRPC`." +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ "call the provided :code:`MLFlwrClient` and " +#~ "call :code:`startFlwrGRPC()`. The attribute " +#~ ":code:`hostname` and :code:`port` tells the" +#~ " client which server to connect to." +#~ " This can be done by entering " +#~ "the hostname and port in the " +#~ "application before clicking the start " +#~ "button to start the federated learning" +#~ " process." +#~ msgstr "" + +#~ msgid "" +#~ "For simple workloads we can start " +#~ "a Flower server and leave all the" +#~ " configuration possibilities at their " +#~ "default values. In a file named " +#~ ":code:`server.py`, import Flower and start " +#~ "the server:" +#~ msgstr "" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system in your ios device. The " +#~ "full `source code " +#~ "`_ for" +#~ " this example can be found in " +#~ ":code:`examples/ios`." +#~ msgstr "" + +#~ msgid "" +#~ "Before we start building our JAX " +#~ "example, we need install the packages" +#~ " :code:`jax`, :code:`jaxlib`, :code:`scikit-" +#~ "learn`, and :code:`flwr`:" +#~ msgstr "" + +#~ msgid "" +#~ "We begin with a brief description " +#~ "of the centralized training code based" +#~ " on a :code:`Linear Regression` model. " +#~ "If you want a more in-depth " +#~ "explanation of what's going on then " +#~ "have a look at the official `JAX" +#~ " documentation `_." +#~ msgstr "" + +#~ msgid "" +#~ "Let's create a new file called " +#~ ":code:`jax_training.py` with all the " +#~ "components required for a traditional " +#~ "(centralized) linear regression training. " +#~ "First, the JAX packages :code:`jax` and" +#~ " :code:`jaxlib` need to be imported. " +#~ "In addition, we need to import " +#~ ":code:`sklearn` since we use " +#~ ":code:`make_regression` for the dataset and" +#~ " :code:`train_test_split` to split the " +#~ "dataset into a training and test " +#~ "set. You can see that we do " +#~ "not yet import the :code:`flwr` package" +#~ " for federated learning. This will be" +#~ " done later." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`load_data()` function loads the " +#~ "mentioned training and test sets." +#~ msgstr "" + +#~ msgid "" +#~ "The model architecture (a very simple" +#~ " :code:`Linear Regression` model) is " +#~ "defined in :code:`load_model()`." +#~ msgstr "" + +#~ msgid "" +#~ "We now need to define the training" +#~ " (function :code:`train()`), which loops " +#~ "over the training set and measures " +#~ "the loss (function :code:`loss_fn()`) for " +#~ "each batch of training examples. The " +#~ "loss function is separate since JAX " +#~ "takes derivatives with a :code:`grad()` " +#~ "function (defined in the :code:`main()` " +#~ "function and called in :code:`train()`)." +#~ msgstr "" + +#~ msgid "" +#~ "The evaluation of the model is " +#~ "defined in the function :code:`evaluation()`." +#~ " The function takes all test examples" +#~ " and measures the loss of the " +#~ "linear regression model." +#~ msgstr "" + +#~ msgid "" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our model using JAX. As " +#~ "already mentioned, the :code:`jax.grad()` " +#~ "function is defined in :code:`main()` " +#~ "and passed to :code:`train()`." +#~ msgstr "" + +#~ msgid "" +#~ "The concept of federating an existing" +#~ " workload is always the same and " +#~ "easy to understand. We have to " +#~ "start a *server* and then use the" +#~ " code in :code:`jax_training.py` for the" +#~ " *clients* that are connected to the" +#~ " *server*. The *server* sends model " +#~ "parameters to the clients. The *clients*" +#~ " run the training and update the " +#~ "parameters. The updated parameters are " +#~ "sent back to the *server*, which " +#~ "averages all received parameter updates. " +#~ "This describes one round of the " +#~ "federated learning process, and we " +#~ "repeat this for multiple rounds." +#~ msgstr "" + +#~ msgid "" +#~ "Finally, we will define our *client* " +#~ "logic in :code:`client.py` and build " +#~ "upon the previously defined JAX training" +#~ " in :code:`jax_training.py`. Our *client* " +#~ "needs to import :code:`flwr`, but also" +#~ " :code:`jax` and :code:`jaxlib` to update" +#~ " the parameters on our JAX model:" +#~ msgstr "" + +#~ msgid "" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`FlowerClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "JAX) because it avoids some of the" +#~ " boilerplate that would otherwise be " +#~ "necessary. :code:`FlowerClient` needs to " +#~ "implement four methods, two methods for" +#~ " getting/setting model parameters, one " +#~ "method for training the model, and " +#~ "one method for testing the model:" +#~ msgstr "" + +#~ msgid ":code:`set_parameters (optional)`" +#~ msgstr "" + +#~ msgid "transform parameters to NumPy :code:`ndarray`'s" +#~ msgstr "" + +#~ msgid "" +#~ "The challenging part is to transform " +#~ "the JAX model parameters from " +#~ ":code:`DeviceArray` to :code:`NumPy ndarray` " +#~ "to make them compatible with " +#~ "`NumPyClient`." +#~ msgstr "" + +#~ msgid "" +#~ "The two :code:`NumPyClient` methods " +#~ ":code:`fit` and :code:`evaluate` make use " +#~ "of the functions :code:`train()` and " +#~ ":code:`evaluate()` previously defined in " +#~ ":code:`jax_training.py`. So what we really " +#~ "do here is we tell Flower through" +#~ " our :code:`NumPyClient` subclass which of" +#~ " our already defined functions to " +#~ "call for training and evaluation. We " +#~ "included type annotations to give you" +#~ " a better understanding of the data" +#~ " types that get passed around." +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial, we will learn " +#~ "how to train a :code:`Logistic " +#~ "Regression` model on MNIST using Flower" +#~ " and scikit-learn." +#~ msgstr "" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. However, before" +#~ " setting up the client and server," +#~ " we will define all functionalities " +#~ "that we need for our federated " +#~ "learning setup within :code:`utils.py`. The" +#~ " :code:`utils.py` contains different functions" +#~ " defining all the machine learning " +#~ "basics:" +#~ msgstr "" + +#~ msgid ":code:`get_model_parameters()`" +#~ msgstr "" + +#~ msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +#~ msgstr "" + +#~ msgid ":code:`set_model_params()`" +#~ msgstr "" + +#~ msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +#~ msgstr "" + +#~ msgid ":code:`set_initial_params()`" +#~ msgstr "" + +#~ msgid "" +#~ "Please check out :code:`utils.py` `here " +#~ "`_ for more details. " +#~ "The pre-defined functions are used " +#~ "in the :code:`client.py` and imported. " +#~ "The :code:`client.py` also requires to " +#~ "import several packages such as Flower" +#~ " and scikit-learn:" +#~ msgstr "" + +#~ msgid "" +#~ "Prior to local training, we need " +#~ "to load the MNIST dataset, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning, " +#~ "and partition the dataset for FL. " +#~ "This can be conveniently achieved using" +#~ " `Flower Datasets `_." +#~ " The :code:`FederatedDataset.load_partition()` method" +#~ " loads the partitioned training set " +#~ "for each partition ID defined in " +#~ "the :code:`--partition-id` argument." +#~ msgstr "" + +#~ msgid "" +#~ "Next, the logistic regression model is" +#~ " defined and initialized with " +#~ ":code:`utils.set_initial_params()`." +#~ msgstr "" + +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to fit the logistic " +#~ "regression we defined earlier)." +#~ msgstr "" + +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "scikit-learn. Implementing :code:`NumPyClient` " +#~ "usually means defining the following " +#~ "methods (:code:`set_parameters` is optional " +#~ "though):" +#~ msgstr "" + +#~ msgid ":code:`set_parameters` (optional)" +#~ msgstr "" + +#~ msgid "is directly imported with :code:`utils.set_model_params()`" +#~ msgstr "" + +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`MnistClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" + +#~ msgid ":code:`server.py`, import Flower and start the server:" +#~ msgstr "" + +#~ msgid "" +#~ "The number of federated learning rounds" +#~ " is set in :code:`fit_round()` and " +#~ "the evaluation is defined in " +#~ ":code:`get_evaluate_fn()`. The evaluation function" +#~ " is called after each federated " +#~ "learning round and gives you information" +#~ " about loss and accuracy. Note that" +#~ " we also make use of Flower " +#~ "Datasets here to load the test " +#~ "split of the MNIST dataset for " +#~ "server-side evaluation." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`main` contains the server-" +#~ "side parameter initialization " +#~ ":code:`utils.set_initial_params()` as well as " +#~ "the aggregation strategy " +#~ ":code:`fl.server.strategy:FedAvg()`. The strategy is" +#~ " the default one, federated averaging " +#~ "(or FedAvg), with two clients and " +#~ "evaluation after each federated learning " +#~ "round. The server can be started " +#~ "with the command " +#~ ":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +#~ "strategy=strategy, " +#~ "config=fl.server.ServerConfig(num_rounds=3))`." +#~ msgstr "" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/sklearn-logreg-" +#~ "mnist`." +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a federated XGBoost model" +#~ " on HIGGS dataset using Flower and" +#~ " :code:`xgboost` package. We use a " +#~ "simple example (`full code xgboost-" +#~ "quickstart `_) with two *clients* " +#~ "and one *server* to demonstrate how " +#~ "federated XGBoost works, and then we " +#~ "dive into a more complex example " +#~ "(`full code xgboost-comprehensive " +#~ "`_) to run various experiments." +#~ msgstr "" + +#~ msgid "" +#~ "Since we want to use :code:`xgboost` " +#~ "package to build up XGBoost trees, " +#~ "let's go ahead and install " +#~ ":code:`xgboost`:" +#~ msgstr "" + +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import xgboost, Flower, Flower Datasets " +#~ "and other related functions:" +#~ msgstr "" + +#~ msgid "" +#~ "In this example, we split the " +#~ "dataset into 30 partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=30)`)." +#~ " Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`partition_id`:" +#~ msgstr "" + +#~ msgid "" +#~ "After that, we do train/test splitting" +#~ " on the given partition (client's " +#~ "local data), and transform data format" +#~ " for :code:`xgboost` package." +#~ msgstr "" + +#~ msgid "" +#~ "The functions of :code:`train_test_split` and" +#~ " :code:`transform_dataset_to_dmatrix` are defined " +#~ "as below:" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`num_local_round` represents the " +#~ "number of iterations for local tree " +#~ "boost. We use CPU for the training" +#~ " in default. One can shift it " +#~ "to GPU by setting :code:`tree_method` to" +#~ " :code:`gpu_hist`. We use AUC as " +#~ "evaluation metric." +#~ msgstr "" + +#~ msgid "" +#~ "After loading the dataset we define " +#~ "the Flower client. We follow the " +#~ "general rule to define :code:`XgbClient` " +#~ "class inherited from :code:`fl.client.Client`." +#~ msgstr "" + +#~ msgid "" +#~ "All required parameters defined above " +#~ "are passed to :code:`XgbClient`'s constructor." +#~ msgstr "" + +#~ msgid "" +#~ "Then, we override :code:`get_parameters`, " +#~ ":code:`fit` and :code:`evaluate` methods " +#~ "insides :code:`XgbClient` class as follows." +#~ msgstr "" + +#~ msgid "" +#~ "Unlike neural network training, XGBoost " +#~ "trees are not started from a " +#~ "specified random weights. In this case," +#~ " we do not use :code:`get_parameters` " +#~ "and :code:`set_parameters` to initialise model" +#~ " parameters for XGBoost. As a result," +#~ " let's return an empty tensor in " +#~ ":code:`get_parameters` when it is called " +#~ "by the server at the first round." +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. From " +#~ "the second round, we load the " +#~ "global model sent from server to " +#~ "new build Booster object, and then " +#~ "update model weights on local training" +#~ " data with function :code:`local_boost` as" +#~ " follows:" +#~ msgstr "" + +#~ msgid "" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`bst_input.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`evaluate`, after loading the " +#~ "global model, we call :code:`bst.eval_set` " +#~ "function to conduct evaluation on valid" +#~ " set. The AUC value will be " +#~ "returned." +#~ msgstr "" + +#~ msgid "" +#~ "Now, we can create an instance of" +#~ " our class :code:`XgbClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` and " +#~ "call :code:`fl.client.start_client()`. The string" +#~ " :code:`\"[::]:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" + +#~ msgid "" +#~ "In a file named :code:`server.py`, " +#~ "import Flower and FedXgbBagging from " +#~ ":code:`flwr.server.strategy`." +#~ msgstr "" + +#~ msgid "" +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients. The :code:`config_func` function is" +#~ " to return the current FL round " +#~ "number to client's :code:`fit()` and " +#~ ":code:`evaluate()` methods." +#~ msgstr "" + +#~ msgid "" +#~ "In file :code:`flwr.server.strategy.fedxgb_bagging.py`," +#~ " we define :code:`FedXgbBagging` inherited " +#~ "from :code:`flwr.server.strategy.FedAvg`. Then, we" +#~ " override the :code:`aggregate_fit`, " +#~ ":code:`aggregate_evaluate` and :code:`evaluate` " +#~ "methods as follows:" +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`aggregate_fit`, we sequentially " +#~ "aggregate the clients' XGBoost trees by" +#~ " calling :code:`aggregate()` function:" +#~ msgstr "" + +#~ msgid "" +#~ "In this function, we first fetch " +#~ "the number of trees and the number" +#~ " of parallel trees for the current" +#~ " and previous model by calling " +#~ ":code:`_get_tree_nums`. Then, the fetched " +#~ "information will be aggregated. After " +#~ "that, the trees (containing model " +#~ "weights) are aggregated to generate a" +#~ " new tree model." +#~ msgstr "" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated XGBoost " +#~ "system. The AUC values can be " +#~ "checked in :code:`metrics_distributed`. One " +#~ "can see that the average AUC " +#~ "increases over FL rounds." +#~ msgstr "" + +#~ msgid "" +#~ "The full `source code " +#~ "`_ for this example can be" +#~ " found in :code:`examples/xgboost-quickstart`." +#~ msgstr "" + +#~ msgid "" +#~ "To do this, we first customise a" +#~ " :code:`ClientManager` in :code:`server_utils.py`:" +#~ msgstr "" + +#~ msgid "" +#~ "The customised :code:`ClientManager` samples " +#~ "all available clients in each FL " +#~ "round based on the order of " +#~ "connection to the server. Then, we " +#~ "define a new strategy :code:`FedXgbCyclic` " +#~ "in :code:`flwr.server.strategy.fedxgb_cyclic.py`, in " +#~ "order to sequentially select only one" +#~ " client in given round and pass " +#~ "the received model to next client." +#~ msgstr "" + +#~ msgid "" +#~ "Unlike the original :code:`FedAvg`, we " +#~ "don't perform aggregation here. Instead, " +#~ "we just make a copy of the " +#~ "received client model as global model" +#~ " by overriding :code:`aggregate_fit`." +#~ msgstr "" + +#~ msgid "" +#~ "Also, the customised :code:`configure_fit` and" +#~ " :code:`configure_evaluate` methods ensure the" +#~ " clients to be sequentially selected " +#~ "given FL round:" +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`dataset.py`, we have a " +#~ "function :code:`instantiate_partitioner` to " +#~ "instantiate the data partitioner based " +#~ "on the given :code:`num_partitions` and " +#~ ":code:`partitioner_type`. Currently, we provide " +#~ "four supported partitioner type to " +#~ "simulate the uniformity/non-uniformity in " +#~ "data quantity (uniform, linear, square, " +#~ "exponential)." +#~ msgstr "" + +#~ msgid "" +#~ "To facilitate centralised evaluation, we " +#~ "define a function in :code:`server_utils.py`:" +#~ msgstr "" + +#~ msgid "" +#~ "This function returns a evaluation " +#~ "function which instantiates a :code:`Booster`" +#~ " object and loads the global model" +#~ " weights to it. The evaluation is " +#~ "conducted by calling :code:`eval_set()` " +#~ "method, and the tested AUC value " +#~ "is reported." +#~ msgstr "" + +#~ msgid "" +#~ "As for distributed evaluation on the " +#~ "clients, it's same as the quick-" +#~ "start example by overriding the " +#~ ":code:`evaluate()` method insides the " +#~ ":code:`XgbClient` class in :code:`client_utils.py`." +#~ msgstr "" + +#~ msgid "" +#~ "We also provide an example code " +#~ "(:code:`sim.py`) to use the simulation " +#~ "capabilities of Flower to simulate " +#~ "federated XGBoost training on either a" +#~ " single machine or a cluster of " +#~ "machines." +#~ msgstr "" + +#~ msgid "" +#~ "After importing all required packages, " +#~ "we define a :code:`main()` function to" +#~ " perform the simulation process:" +#~ msgstr "" + +#~ msgid "" +#~ "We first load the dataset and " +#~ "perform data partitioning, and the " +#~ "pre-processed data is stored in a " +#~ ":code:`list`. After the simulation begins, " +#~ "the clients won't need to pre-" +#~ "process their partitions again." +#~ msgstr "" + +#~ msgid "" +#~ "After that, we start the simulation " +#~ "by calling :code:`fl.simulation.start_simulation`:" +#~ msgstr "" + +#~ msgid "" +#~ "One of key parameters for " +#~ ":code:`start_simulation` is :code:`client_fn` which" +#~ " returns a function to construct a" +#~ " client. We define it as follows:" +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`utils.py`, we define the " +#~ "arguments parsers for clients, server " +#~ "and simulation, allowing users to " +#~ "specify different experimental settings. Let's" +#~ " first see the sever side:" +#~ msgstr "" + +#~ msgid "" +#~ "This allows user to specify training " +#~ "strategies / the number of total " +#~ "clients / FL rounds / participating " +#~ "clients / clients for evaluation, and" +#~ " evaluation fashion. Note that with " +#~ ":code:`--centralised-eval`, the sever will " +#~ "do centralised evaluation and all " +#~ "functionalities for client evaluation will " +#~ "be disabled." +#~ msgstr "" + +#~ msgid "" +#~ "This defines various options for client" +#~ " data partitioning. Besides, clients also" +#~ " have an option to conduct evaluation" +#~ " on centralised test set by setting" +#~ " :code:`--centralised-eval`, as well as " +#~ "an option to perform scaled learning " +#~ "rate based on the number of " +#~ "clients by setting :code:`--scaled-lr`." +#~ msgstr "" + +#~ msgid "" +#~ "The full `code " +#~ "`_ for this comprehensive " +#~ "example can be found in :code:`examples" +#~ "/xgboost-comprehensive`." +#~ msgstr "" + +#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" +#~ msgstr "" + +#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" +#~ msgstr "" + +#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" +#~ msgstr "" + +#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" +#~ msgstr "" + +#~ msgid "|d7efb5705dd3467f991ed23746824a07|" +#~ msgstr "" + +#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" +#~ msgstr "" + +#~ msgid "|a80714782dde439ab73936518f91fc3c|" +#~ msgstr "" + +#~ msgid "|c62080ca6197473da57d191c8225a9d9|" +#~ msgstr "" + +#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" +#~ msgstr "" + +#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" +#~ msgstr "" + +#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" +#~ msgstr "" + +#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" +#~ msgstr "" + +#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" +#~ msgstr "" + +#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" +#~ msgstr "" + +#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" +#~ msgstr "" + +#~ msgid "|131af8322dc5466b827afd24be98f8c0|" +#~ msgstr "" + +#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" +#~ msgstr "" + +#~ msgid "|d62da263071d45a496f543e41fce3a19|" +#~ msgstr "" + +#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" +#~ msgstr "" + +#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" +#~ msgstr "" + +#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" +#~ msgstr "" + +#~ msgid "|b021ff9d25814458b1e631f8985a648b|" +#~ msgstr "" + +#~ msgid "|e6ca84e1df244f238288a768352678e5|" +#~ msgstr "" + +#~ msgid "|39c2422082554a21963baffb33a0d057|" +#~ msgstr "" + +#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" +#~ msgstr "" + +#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" +#~ msgstr "" + +#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" +#~ msgstr "" + +#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" +#~ msgstr "" + diff --git a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po index 9af452fb0be2..ccf319df4629 100644 --- a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po +++ b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-09-24 00:29+0000\n" +"POT-Creation-Date: 2024-09-27 00:30+0000\n" "PO-Revision-Date: 2024-06-12 10:09+0000\n" "Last-Translator: Yan Gao \n" "Language: zh_Hans\n" @@ -62,23 +62,23 @@ msgid "" "or not by reading the Flower source code." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:22 +#: ../../source/contributor-explanation-public-and-private-apis.rst:23 #, fuzzy msgid "Flower public API" msgstr "Flower 客户端。" -#: ../../source/contributor-explanation-public-and-private-apis.rst:24 +#: ../../source/contributor-explanation-public-and-private-apis.rst:25 msgid "Flower has a well-defined public API. Let's look at this in more detail." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:28 +#: ../../source/contributor-explanation-public-and-private-apis.rst:29 msgid "" "Every component that is reachable by recursively following " "``__init__.__all__`` starting from the root package (``flwr``) is part of" " the public API." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:30 +#: ../../source/contributor-explanation-public-and-private-apis.rst:32 msgid "" "If you want to determine whether a component " "(class/function/generator/...) is part of the public API or not, you need" @@ -86,13 +86,13 @@ msgid "" "src/py/flwr`` to look at the Python sub-packages contained ``flwr``:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:43 +#: ../../source/contributor-explanation-public-and-private-apis.rst:46 msgid "" "Contrast this with the definition of ``__all__`` in the root " "``src/py/flwr/__init__.py``:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:55 +#: ../../source/contributor-explanation-public-and-private-apis.rst:59 msgid "" "You can see that ``flwr`` has six subpackages (``cli``, ``client``, " "``common``, ``proto``, ``server``, ``simulation``), but only four of them" @@ -100,7 +100,7 @@ msgid "" "``simulation``)." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:57 +#: ../../source/contributor-explanation-public-and-private-apis.rst:63 msgid "" "What does this mean? It means that ``client``, ``common``, ``server`` and" " ``simulation`` are part of the public API, but ``cli`` and ``proto`` are" @@ -111,21 +111,21 @@ msgid "" "even be removed completely." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:62 +#: ../../source/contributor-explanation-public-and-private-apis.rst:70 msgid "Therefore, as a Flower user:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:64 +#: ../../source/contributor-explanation-public-and-private-apis.rst:72 msgid "``from flwr import client`` ✅ Ok, you're importing a public API." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:65 +#: ../../source/contributor-explanation-public-and-private-apis.rst:73 msgid "" "``from flwr import proto`` ❌ Not recommended, you're importing a private " "API." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:67 +#: ../../source/contributor-explanation-public-and-private-apis.rst:75 msgid "" "What about components that are nested deeper in the hierarchy? Let's look" " at Flower strategies to see another typical pattern. Flower strategies " @@ -134,7 +134,7 @@ msgid "" "``src/py/flwr/server/strategy/__init__.py``:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:81 +#: ../../source/contributor-explanation-public-and-private-apis.rst:91 msgid "" "What's notable here is that all strategies are implemented in dedicated " "modules (e.g., ``fedavg.py``). In ``__init__.py``, we *import* the " @@ -146,33 +146,33 @@ msgid "" "the public API (as long as we update the import path in ``__init__.py``)." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:86 +#: ../../source/contributor-explanation-public-and-private-apis.rst:99 msgid "Therefore:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:88 +#: ../../source/contributor-explanation-public-and-private-apis.rst:101 msgid "" "``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a " "class that is part of the public API." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:89 +#: ../../source/contributor-explanation-public-and-private-apis.rst:103 msgid "" "``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're " "importing a private module." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:91 +#: ../../source/contributor-explanation-public-and-private-apis.rst:106 msgid "" "This approach is also implemented in the tooling that automatically " "builds API reference docs." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:94 +#: ../../source/contributor-explanation-public-and-private-apis.rst:110 msgid "Flower public API of private packages" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:96 +#: ../../source/contributor-explanation-public-and-private-apis.rst:112 msgid "" "We also use this to define the public API of private subpackages. Public," " in this context, means the API that other ``flwr`` subpackages should " @@ -180,14 +180,14 @@ msgid "" "not exported via ``src/py/flwr/server/__init__.py``'s ``__all__``)." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:100 +#: ../../source/contributor-explanation-public-and-private-apis.rst:117 msgid "" "Still, the private sub-package ``flwr.server.driver`` defines a " "\"public\" API using ``__all__`` in " "``src/py/flwr/server/driver/__init__.py``:" msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:114 +#: ../../source/contributor-explanation-public-and-private-apis.rst:132 msgid "" "The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` " "are never used by Flower framework users, only by other parts of the " @@ -199,7 +199,7 @@ msgid "" "``InMemoryDriver`` class definition)." msgstr "" -#: ../../source/contributor-explanation-public-and-private-apis.rst:117 +#: ../../source/contributor-explanation-public-and-private-apis.rst:140 msgid "" "This is because ``flwr.server.driver`` defines a public interface for " "other ``flwr`` subpackages. This allows codeowners of " @@ -234,17 +234,17 @@ msgid "" "development environment." msgstr "在开始之前,我们需要在本地开发环境中满足一些先决条件。" -#: ../../source/contributor-how-to-build-docker-images.rst:12 +#: ../../source/contributor-how-to-build-docker-images.rst:13 #, fuzzy msgid "Clone the ``flower`` repository." msgstr "**叉花仓库**" -#: ../../source/contributor-how-to-build-docker-images.rst:18 +#: ../../source/contributor-how-to-build-docker-images.rst:19 #, fuzzy msgid "Verify the Docker daemon is running." msgstr "验证 Docker 守护进程是否正在运行。" -#: ../../source/contributor-how-to-build-docker-images.rst:20 +#: ../../source/contributor-how-to-build-docker-images.rst:21 #, fuzzy msgid "" "The build instructions that assemble the images are located in the " @@ -252,7 +252,7 @@ msgid "" "``src/docker``." msgstr "组装镜像的构建说明位于各自的 Dockerfile 中。你可以在 ``src/docker`` 的子目录中找到它们。" -#: ../../source/contributor-how-to-build-docker-images.rst:23 +#: ../../source/contributor-how-to-build-docker-images.rst:24 #, fuzzy msgid "" "Flower Docker images are configured via build arguments. Through build " @@ -267,160 +267,160 @@ msgstr "" "``PYTHON_VERSION`` 联编参数指定要安装的 Python " "版本。有些联编参数有默认值,有些则必须在联编映像时指定。每个映像的所有可用联编参数都列在下表中。" -#: ../../source/contributor-how-to-build-docker-images.rst:30 +#: ../../source/contributor-how-to-build-docker-images.rst:32 #, fuzzy msgid "Building the Base Image" msgstr "加载数据" -#: ../../source/contributor-how-to-build-docker-images.rst:36 -#: ../../source/contributor-how-to-build-docker-images.rst:98 +#: ../../source/contributor-how-to-build-docker-images.rst:38 +#: ../../source/contributor-how-to-build-docker-images.rst:104 #, fuzzy msgid "Build argument" msgstr "构建文档" -#: ../../source/contributor-how-to-build-docker-images.rst:37 -#: ../../source/contributor-how-to-build-docker-images.rst:99 +#: ../../source/contributor-how-to-build-docker-images.rst:39 +#: ../../source/contributor-how-to-build-docker-images.rst:105 #, fuzzy msgid "Description" msgstr "停用" -#: ../../source/contributor-how-to-build-docker-images.rst:38 -#: ../../source/contributor-how-to-build-docker-images.rst:100 +#: ../../source/contributor-how-to-build-docker-images.rst:40 +#: ../../source/contributor-how-to-build-docker-images.rst:106 #, fuzzy msgid "Required" msgstr "所需变更" -#: ../../source/contributor-how-to-build-docker-images.rst:39 -#: ../../source/contributor-how-to-build-docker-images.rst:101 -#: ../../source/docker/persist-superlink-state.rst:18 -#: ../../source/docker/pin-version.rst:11 +#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:107 +#: ../../source/docker/persist-superlink-state.rst:19 +#: ../../source/docker/pin-version.rst:12 #: ../../source/docker/set-environment-variables.rst:8 #, fuzzy msgid "Example" msgstr "实例" -#: ../../source/contributor-how-to-build-docker-images.rst:40 +#: ../../source/contributor-how-to-build-docker-images.rst:42 msgid "``DISTRO``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:43 #, fuzzy msgid "The Linux distribution to use as the base image." msgstr "基础镜像的存储库名称。" -#: ../../source/contributor-how-to-build-docker-images.rst:42 -#: ../../source/contributor-how-to-build-docker-images.rst:46 -#: ../../source/contributor-how-to-build-docker-images.rst:50 -#: ../../source/contributor-how-to-build-docker-images.rst:66 -#: ../../source/contributor-how-to-build-docker-images.rst:70 -#: ../../source/contributor-how-to-build-docker-images.rst:104 +#: ../../source/contributor-how-to-build-docker-images.rst:44 +#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:52 +#: ../../source/contributor-how-to-build-docker-images.rst:68 +#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:110 #, fuzzy msgid "No" msgstr "现在" -#: ../../source/contributor-how-to-build-docker-images.rst:43 +#: ../../source/contributor-how-to-build-docker-images.rst:45 #, fuzzy msgid "``ubuntu``" msgstr "``UBUNTU_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:44 +#: ../../source/contributor-how-to-build-docker-images.rst:46 #, fuzzy msgid "``DISTRO_VERSION``" msgstr "``PIP_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:45 +#: ../../source/contributor-how-to-build-docker-images.rst:47 msgid "Version of the Linux distribution." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:47 +#: ../../source/contributor-how-to-build-docker-images.rst:49 msgid ":substitution-code:`|ubuntu_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:50 #, fuzzy msgid "``PYTHON_VERSION``" msgstr "Python 版本" -#: ../../source/contributor-how-to-build-docker-images.rst:49 +#: ../../source/contributor-how-to-build-docker-images.rst:51 #, fuzzy msgid "Version of ``python`` to be installed." msgstr "要安装的 ``python`` 版本。" -#: ../../source/contributor-how-to-build-docker-images.rst:51 +#: ../../source/contributor-how-to-build-docker-images.rst:53 msgid "``3.11`` or ``3.11.1``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:52 +#: ../../source/contributor-how-to-build-docker-images.rst:54 #, fuzzy msgid "``PIP_VERSION``" msgstr "``PIP_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:53 +#: ../../source/contributor-how-to-build-docker-images.rst:55 #, fuzzy msgid "Version of ``pip`` to be installed." msgstr "要安装的 ``pip` 版本。" -#: ../../source/contributor-how-to-build-docker-images.rst:54 -#: ../../source/contributor-how-to-build-docker-images.rst:58 -#: ../../source/contributor-how-to-build-docker-images.rst:62 -#: ../../source/contributor-how-to-build-docker-images.rst:108 +#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:60 +#: ../../source/contributor-how-to-build-docker-images.rst:64 +#: ../../source/contributor-how-to-build-docker-images.rst:114 #, fuzzy msgid "Yes" msgstr "类型" -#: ../../source/contributor-how-to-build-docker-images.rst:55 +#: ../../source/contributor-how-to-build-docker-images.rst:57 msgid ":substitution-code:`|pip_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:58 #, fuzzy msgid "``SETUPTOOLS_VERSION``" msgstr "设置工具版本" -#: ../../source/contributor-how-to-build-docker-images.rst:57 +#: ../../source/contributor-how-to-build-docker-images.rst:59 #, fuzzy msgid "Version of ``setuptools`` to be installed." msgstr "要安装的 `setuptools`` 版本。" -#: ../../source/contributor-how-to-build-docker-images.rst:59 +#: ../../source/contributor-how-to-build-docker-images.rst:61 #, fuzzy msgid ":substitution-code:`|setuptools_version|`" msgstr "设置工具版本" -#: ../../source/contributor-how-to-build-docker-images.rst:60 +#: ../../source/contributor-how-to-build-docker-images.rst:62 #, fuzzy msgid "``FLWR_VERSION``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:61 +#: ../../source/contributor-how-to-build-docker-images.rst:63 #, fuzzy msgid "Version of Flower to be installed." msgstr "要安装的 Flower 版本。" -#: ../../source/contributor-how-to-build-docker-images.rst:63 +#: ../../source/contributor-how-to-build-docker-images.rst:65 msgid ":substitution-code:`|stable_flwr_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:64 +#: ../../source/contributor-how-to-build-docker-images.rst:66 #, fuzzy msgid "``FLWR_PACKAGE``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:65 +#: ../../source/contributor-how-to-build-docker-images.rst:67 #, fuzzy msgid "The Flower package to be installed." msgstr "要安装的 PyPI 软件包。" -#: ../../source/contributor-how-to-build-docker-images.rst:67 +#: ../../source/contributor-how-to-build-docker-images.rst:69 msgid "``flwr`` or ``flwr-nightly``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:68 +#: ../../source/contributor-how-to-build-docker-images.rst:70 #, fuzzy msgid "``FLWR_VERSION_REF``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:69 +#: ../../source/contributor-how-to-build-docker-images.rst:71 msgid "" "A `direct reference " "`_上创建一个免费的Weblate帐户。有关个人资料设置的更多信息,请参阅`这里" " `_。" -#: ../../source/contributor-how-to-contribute-translations.rst:29 +#: ../../source/contributor-how-to-contribute-translations.rst:28 msgid "" "Once you are signed in to Weblate, you can navigate to the `Flower " "Framework project `_。在这里,您可以看到网站上现有的各种语言。" -#: ../../source/contributor-how-to-contribute-translations.rst:34 +#: ../../source/contributor-how-to-contribute-translations.rst:32 msgid "" "Once you have selected the language you want to contribute to, you should" " see a similar interface to this:" msgstr "选择您要贡献的语言后,您应该会看到与此类似的界面:" -#: ../../source/contributor-how-to-contribute-translations.rst:39 +#: ../../source/contributor-how-to-contribute-translations.rst:37 msgid "" "The most straight forward option here is to click on the ``Translate`` " "button on the top right (in the ``Translation status`` section). This " @@ -582,11 +582,11 @@ msgid "" "untranslated strings." msgstr "最简单的方法是点击右上角(\"翻译状态 \"部分)的 \"翻译 \"按钮。这将自动带您进入未翻译字符串的翻译界面。" -#: ../../source/contributor-how-to-contribute-translations.rst:43 +#: ../../source/contributor-how-to-contribute-translations.rst:41 msgid "This is what the interface looks like:" msgstr "这就是界面的样子:" -#: ../../source/contributor-how-to-contribute-translations.rst:47 +#: ../../source/contributor-how-to-contribute-translations.rst:45 #, fuzzy msgid "" "You input your translation in the text box at the top and then, once you " @@ -600,7 +600,7 @@ msgstr "" "\"保存并继续\"(保存翻译内容并转到下一个未翻译的字符串)、\"保存并停留\"(保存翻译内容并停留在同一页面)、\"建议\"(将您的翻译添加到建议中供其他用户查看)或" " \"跳过\"(转到下一个未翻译的字符串而不保存任何内容)。" -#: ../../source/contributor-how-to-contribute-translations.rst:54 +#: ../../source/contributor-how-to-contribute-translations.rst:51 msgid "" "In order to help with the translations, you can see on the bottom the " "``Nearby strings``, the ``Comments`` (from other contributors), the " @@ -611,14 +611,14 @@ msgstr "" "为了帮助翻译,您可以在底部看到 \"邻近字符串\"、\"评论\"(来自其他贡献者)、\"自动建议\"(来自机器翻译引擎)、\"其他语言 " "\"中的翻译以及该字符串的 \"历史翻译\"。" -#: ../../source/contributor-how-to-contribute-translations.rst:59 +#: ../../source/contributor-how-to-contribute-translations.rst:56 msgid "" "On the right, under the ``String information`` section, you can also " "click the link under ``Source string location`` in order to view the " "source of the doc file containing the string." msgstr "在右侧的 \"字符串信息 \"部分,您还可以单击 \"源字符串位置 \"下的链接,以查看包含字符串的 doc 文件的源文件。" -#: ../../source/contributor-how-to-contribute-translations.rst:63 +#: ../../source/contributor-how-to-contribute-translations.rst:60 msgid "" "For more information about translating using Weblate, you can check out " "this `in-depth guide " @@ -627,11 +627,11 @@ msgstr "" "有关使用 Weblate 进行翻译的更多信息,您可以查看本 \"深入指南 " "`_\"。" -#: ../../source/contributor-how-to-contribute-translations.rst:67 +#: ../../source/contributor-how-to-contribute-translations.rst:64 msgid "Add new languages" msgstr "添加新语言" -#: ../../source/contributor-how-to-contribute-translations.rst:69 +#: ../../source/contributor-how-to-contribute-translations.rst:66 msgid "" "If you want to add a new language, you will first have to contact us, " "either on `Slack `_, or by opening an issue" @@ -654,24 +654,24 @@ msgstr "" "在开发 Flower 框架时,我们希望确保所有贡献者使用相同的开发环境来格式化代码或运行测试。为此,我们使用了 VSCode " "远程容器扩展。这是什么?请阅读下面这段话:" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:7 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:8 +#, fuzzy msgid "" "The Visual Studio Code Remote - Containers extension lets you use a " "Docker container as a fully-featured development environment. It allows " "you to open any folder inside (or mounted into) a container and take " "advantage of Visual Studio Code's full feature set. A " -":code:`devcontainer.json` file in your project tells VS Code how to " -"access (or create) a development container with a well-defined tool and " -"runtime stack. This container can be used to run an application or to " -"separate tools, libraries, or runtimes needed for working with a " -"codebase." +"``devcontainer.json`` file in your project tells VS Code how to access " +"(or create) a development container with a well-defined tool and runtime " +"stack. This container can be used to run an application or to separate " +"tools, libraries, or runtimes needed for working with a codebase." msgstr "" "Visual Studio Code Remote - " "Containers扩展可让你将Docker容器用作功能齐全的开发环境。它允许你打开容器内(或挂载到容器内)的任何文件夹,并利用 Visual " "Studio Code 的全部功能集。项目中的 :code:`devcontainer.json` 文件会告诉 VS Code " "如何访问(或创建)一个带有定义明确的工具和运行时栈的开发容器。该容器可用于运行应用程序,也可用于分离处理代码库所需的工具、库或运行时。" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:9 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:16 msgid "" "Workspace files are mounted from the local file system or copied or " "cloned into the container. Extensions are installed and run inside the " @@ -680,33 +680,33 @@ msgid "" " environment just by connecting to a different container." msgstr "工作区文件从本地文件系统加载,或复制或克隆到容器中。扩展在容器内安装和运行,在容器内它们可以完全访问工具、平台和文件系统。这意味着,只需连接到不同的容器,就能无缝切换整个开发环境。" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:22 #, fuzzy msgid "" "Source: `Official VSCode documentation " "`_" msgstr "来源:`VSCode 官方文档 `_" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:26 msgid "Getting started" msgstr "开始" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:17 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:28 #, fuzzy msgid "" -"Configuring and setting up the :code:`Dockerfile` as well the " -"configuration for the devcontainer can be a bit more involved. The good " -"thing is you don't have to do it. Usually it should be enough to install " -"`Docker `_ on your system and " -"ensure its available on your command line. Additionally, install the " -"`VSCode Containers Extension `_ on your system and ensure its" +" available on your command line. Additionally, install the `VSCode " +"Containers Extension `_." msgstr "" "配置和设置 :code:`Dockerfile` 以及 devcontainer 的配置可能比较复杂。好在你想做就得做。通常只需在系统中安装 " "Docker 并确保其在命令行中可用即可。此外,请安装 `VSCode Containers Extension " "`_。" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:19 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:35 msgid "" "Now you should be good to go. When starting VSCode, it will ask you to " "run in the container environment and - if you confirm - automatically " @@ -718,13 +718,13 @@ msgstr "" "现在你应该可以开始了。启动 VSCode 时,它会要求你在容器环境中运行,如果你确认,它会自动构建容器并使用它。要手动指示 VSCode 使用 " "devcontainer,可以在安装扩展后,点击 VSCode 窗口左下角的绿色区域,然后选择 \"*(重新)在容器中打开文件夹*\"选项。" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:21 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:41 msgid "" "In some cases your setup might be more involved. For those cases consult " "the following sources:" msgstr "在某些情况下,您的设置可能更复杂。有关这些情况,请参考以下资料:" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:44 #, fuzzy msgid "" "`Developing inside a Container " @@ -734,7 +734,7 @@ msgstr "" "在容器内开发 `_" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:46 #, fuzzy msgid "" "`Remote development in Containers " @@ -763,13 +763,13 @@ msgstr "" "依赖关系,然后重新安装(运行 ``poetry install` 前,别忘了删除 ``poetry.lock` (``rm " "poetry.lock`))。" -#: ../../source/contributor-how-to-install-development-versions.rst:12 +#: ../../source/contributor-how-to-install-development-versions.rst:14 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (without " "extras)" msgstr "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (不含额外内容)" -#: ../../source/contributor-how-to-install-development-versions.rst:13 +#: ../../source/contributor-how-to-install-development-versions.rst:15 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (with extras)" @@ -777,17 +777,17 @@ msgstr "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (包含额外内容)" -#: ../../source/contributor-how-to-install-development-versions.rst:15 +#: ../../source/contributor-how-to-install-development-versions.rst:18 msgid "" "Install ``flwr`` from a local copy of the Flower source code via " "``pyproject.toml``:" msgstr "通过 ``pyproject.toml`` 从 Flower 源代码的本地副本安装 ``flwr``:" -#: ../../source/contributor-how-to-install-development-versions.rst:17 +#: ../../source/contributor-how-to-install-development-versions.rst:20 msgid "``flwr = { path = \"../../\", develop = true }`` (without extras)" msgstr "``flwr = { path = \"../../\", develop = true }`` (不含额外内容)" -#: ../../source/contributor-how-to-install-development-versions.rst:18 +#: ../../source/contributor-how-to-install-development-versions.rst:21 msgid "" "``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " "}`` (with extras)" @@ -795,18 +795,18 @@ msgstr "" "``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " "}`` (包含额外内容)" -#: ../../source/contributor-how-to-install-development-versions.rst:20 +#: ../../source/contributor-how-to-install-development-versions.rst:23 msgid "Install ``flwr`` from a local wheel file via ``pyproject.toml``:" msgstr "通过 ``pyproject.toml`` 从本地轮子文件安装 ``flwr``:" -#: ../../source/contributor-how-to-install-development-versions.rst:22 +#: ../../source/contributor-how-to-install-development-versions.rst:25 #, fuzzy msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (without" " extras)" msgstr "``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\" }``(无额外内容)" -#: ../../source/contributor-how-to-install-development-versions.rst:23 +#: ../../source/contributor-how-to-install-development-versions.rst:26 #, fuzzy msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\", extras = " @@ -815,7 +815,7 @@ msgstr "" "``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\", extras = " "[\"simulation\"] }`` (包含额外内容)" -#: ../../source/contributor-how-to-install-development-versions.rst:25 +#: ../../source/contributor-how-to-install-development-versions.rst:29 msgid "" "Please refer to the Poetry documentation for further details: `Poetry " "Dependency Specification `_" -#: ../../source/contributor-how-to-install-development-versions.rst:28 +#: ../../source/contributor-how-to-install-development-versions.rst:33 msgid "Using pip (recommended on Colab)" msgstr "使用 pip(建议在 Colab 上使用)" -#: ../../source/contributor-how-to-install-development-versions.rst:30 +#: ../../source/contributor-how-to-install-development-versions.rst:35 msgid "Install a ``flwr`` pre-release from PyPI:" msgstr "从 PyPI 安装 ``flwr`` 预发行版:" -#: ../../source/contributor-how-to-install-development-versions.rst:32 +#: ../../source/contributor-how-to-install-development-versions.rst:37 msgid "``pip install -U --pre flwr`` (without extras)" msgstr "``pip install -U -pre flwr``(不含额外功能)" -#: ../../source/contributor-how-to-install-development-versions.rst:33 +#: ../../source/contributor-how-to-install-development-versions.rst:38 msgid "``pip install -U --pre 'flwr[simulation]'`` (with extras)" msgstr "``pip install -U -pre 'flwr[simulation]'``(包含额外功能)" -#: ../../source/contributor-how-to-install-development-versions.rst:35 +#: ../../source/contributor-how-to-install-development-versions.rst:40 msgid "" "Python packages can be installed from git repositories. Use one of the " "following commands to install the Flower directly from GitHub." msgstr "Python 软件包可以从 git 仓库安装。使用以下命令之一直接从 GitHub 安装 Flower。" -#: ../../source/contributor-how-to-install-development-versions.rst:37 +#: ../../source/contributor-how-to-install-development-versions.rst:43 msgid "Install ``flwr`` from the default GitHub branch (``main``):" msgstr "从 GitHub 的默认分支 (``main`) 安装 ``flwr``:" -#: ../../source/contributor-how-to-install-development-versions.rst:39 +#: ../../source/contributor-how-to-install-development-versions.rst:45 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git`` (without " "extras)" msgstr "``pip install flwr@git+https://github.com/adap/flower.git`` (不含额外功能)" -#: ../../source/contributor-how-to-install-development-versions.rst:40 +#: ../../source/contributor-how-to-install-development-versions.rst:46 msgid "" "``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" " (with extras)" @@ -864,11 +864,11 @@ msgstr "" "``pip install " "'flwr[simulation]@git+https://github.com/adap/flower.git'``(带附加功能)" -#: ../../source/contributor-how-to-install-development-versions.rst:42 +#: ../../source/contributor-how-to-install-development-versions.rst:49 msgid "Install ``flwr`` from a specific GitHub branch (``branch-name``):" msgstr "从特定的 GitHub 分支 (`分支名`) 安装 ``flwr``:" -#: ../../source/contributor-how-to-install-development-versions.rst:44 +#: ../../source/contributor-how-to-install-development-versions.rst:51 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " "(without extras)" @@ -876,7 +876,7 @@ msgstr "" "``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " "(不含附加功能)" -#: ../../source/contributor-how-to-install-development-versions.rst:45 +#: ../../source/contributor-how-to-install-development-versions.rst:53 msgid "" "``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" "@branch-name'`` (with extras)" @@ -884,18 +884,18 @@ msgstr "" "``pip install " "'flwr[simulation]@git+https://github.com/adap/flower.git@分支名'``(带附加功能)" -#: ../../source/contributor-how-to-install-development-versions.rst:49 +#: ../../source/contributor-how-to-install-development-versions.rst:57 msgid "Open Jupyter Notebooks on Google Colab" msgstr "在谷歌 Colab 上打开 Jupyter 笔记本" -#: ../../source/contributor-how-to-install-development-versions.rst:51 +#: ../../source/contributor-how-to-install-development-versions.rst:59 #, fuzzy msgid "" "Open the notebook ``doc/source/tutorial-series-get-started-with-flower-" "pytorch.ipynb``:" msgstr "打开笔记本 ``doc/source/tutorial-get-started-with-flower-pytorch.ipynb``:" -#: ../../source/contributor-how-to-install-development-versions.rst:53 +#: ../../source/contributor-how-to-install-development-versions.rst:61 #, fuzzy msgid "" "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" @@ -904,7 +904,7 @@ msgstr "" "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" "/tutorial-get-started-with-flower-pytorch.ipynb" -#: ../../source/contributor-how-to-install-development-versions.rst:55 +#: ../../source/contributor-how-to-install-development-versions.rst:63 msgid "" "Open a development version of the same notebook from branch `branch-name`" " by changing ``main`` to ``branch-name`` (right after ``blob``):" @@ -912,7 +912,7 @@ msgstr "" "将 ``main`` 改为 ``branch-name``(紧跟在 ``blob``之后),从分支 `branch-name` " "打开同一笔记本的开发版本:" -#: ../../source/contributor-how-to-install-development-versions.rst:57 +#: ../../source/contributor-how-to-install-development-versions.rst:66 #, fuzzy msgid "" "https://colab.research.google.com/github/adap/flower/blob/branch-" @@ -921,22 +921,22 @@ msgstr "" "https://colab.research.google.com/github/adap/flower/blob/branch-" "name/doc/source/tutorial-get-started-with-flower-pytorch.ipynb" -#: ../../source/contributor-how-to-install-development-versions.rst:59 +#: ../../source/contributor-how-to-install-development-versions.rst:68 msgid "Install a `whl` on Google Colab:" msgstr "在 Google Colab 上安装 `whl`:" -#: ../../source/contributor-how-to-install-development-versions.rst:61 +#: ../../source/contributor-how-to-install-development-versions.rst:70 msgid "" "In the vertical icon grid on the left hand side, select ``Files`` > " "``Upload to session storage``" msgstr "在左侧的垂直图标网格中,选择 \"文件\">\"上传到会话存储\"" -#: ../../source/contributor-how-to-install-development-versions.rst:62 +#: ../../source/contributor-how-to-install-development-versions.rst:72 #, fuzzy msgid "Upload the whl (e.g., ``flwr-1.8.0-py3-none-any.whl``)" msgstr "更新 whl (e.g., ``flwr-1.7.0-py3-none-any.whl``)" -#: ../../source/contributor-how-to-install-development-versions.rst:63 +#: ../../source/contributor-how-to-install-development-versions.rst:73 #, fuzzy msgid "" "Change ``!pip install -q 'flwr[simulation]' torch torchvision " @@ -957,18 +957,18 @@ msgid "" "change in the future." msgstr "本文件描述了当前的发布流程。今后可能会有变化,也可能不会有变化。" -#: ../../source/contributor-how-to-release-flower.rst:7 +#: ../../source/contributor-how-to-release-flower.rst:8 msgid "During the release" msgstr "在发布期间" -#: ../../source/contributor-how-to-release-flower.rst:9 +#: ../../source/contributor-how-to-release-flower.rst:10 msgid "" "The version number of a release is stated in ``pyproject.toml``. To " "release a new version of Flower, the following things need to happen (in " "that order):" msgstr "版本号在 ``pyproject.toml`` 中说明。要发布 Flower 的新版本,需要完成以下工作(按顺序排列):" -#: ../../source/contributor-how-to-release-flower.rst:11 +#: ../../source/contributor-how-to-release-flower.rst:13 #, fuzzy msgid "" "Run ``python3 src/py/flwr_tool/update_changelog.py `` in " @@ -978,7 +978,7 @@ msgstr "" "运行 ``python3 src/py/flwr_tool/update_changelog.py `` " "以将每项新更改添加到更新日志中(之后可对更新日志进行手动更改,直到看起来不错为止)。" -#: ../../source/contributor-how-to-release-flower.rst:12 +#: ../../source/contributor-how-to-release-flower.rst:16 #, fuzzy msgid "" "Once the changelog has been updated with all the changes, run ``./dev" @@ -992,7 +992,7 @@ msgstr "" "v``,其中````是``pyproject.toml``中的版本(注意前面的``v``)。这将用版本和当前日期替换更新日志中的" " ``Unreleased`` 标头,并为贡献者添加一条感谢信息。打开一个包含这些更改的拉取请求。" -#: ../../source/contributor-how-to-release-flower.rst:13 +#: ../../source/contributor-how-to-release-flower.rst:22 #, fuzzy msgid "" "Once the pull request is merged, tag the release commit with the version " @@ -1004,93 +1004,93 @@ msgstr "" "在 PR 合并后立即用版本号标记发布提交:``git tag v0.12.3``,然后``git push --tags``。这将在 GitHub" " 上创建一个包含正确工件和更新日志相关部分的发布草案。" -#: ../../source/contributor-how-to-release-flower.rst:14 +#: ../../source/contributor-how-to-release-flower.rst:26 msgid "Check the draft release on GitHub, and if everything is good, publish it." msgstr "检查 GitHub 上的发布稿,如果一切正常,就发布它。" -#: ../../source/contributor-how-to-release-flower.rst:17 +#: ../../source/contributor-how-to-release-flower.rst:29 msgid "After the release" msgstr "发布后" -#: ../../source/contributor-how-to-release-flower.rst:19 +#: ../../source/contributor-how-to-release-flower.rst:31 msgid "Create a pull request which contains the following changes:" msgstr "创建包含以下更改的拉取请求:" -#: ../../source/contributor-how-to-release-flower.rst:21 +#: ../../source/contributor-how-to-release-flower.rst:33 msgid "Increase the minor version in ``pyproject.toml`` by one." msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: ../../source/contributor-how-to-release-flower.rst:22 +#: ../../source/contributor-how-to-release-flower.rst:34 msgid "Update all files which contain the current version number if necessary." msgstr "如有必要,更新包含当前版本号的所有文件。" -#: ../../source/contributor-how-to-release-flower.rst:23 +#: ../../source/contributor-how-to-release-flower.rst:35 msgid "Add a new ``Unreleased`` section in ``changelog.md``." msgstr "在 ``changelog.md`` 中添加新的 ``Unreleased`` 部分。" -#: ../../source/contributor-how-to-release-flower.rst:25 +#: ../../source/contributor-how-to-release-flower.rst:37 msgid "" "Merge the pull request on the same day (i.e., before a new nightly " "release gets published to PyPI)." msgstr "在同一天合并拉取请求(即在新版本发布到 PyPI 之前)。" -#: ../../source/contributor-how-to-release-flower.rst:28 +#: ../../source/contributor-how-to-release-flower.rst:41 msgid "Publishing a pre-release" msgstr "发布预发布版本" -#: ../../source/contributor-how-to-release-flower.rst:31 +#: ../../source/contributor-how-to-release-flower.rst:44 msgid "Pre-release naming" msgstr "释放前命名" -#: ../../source/contributor-how-to-release-flower.rst:33 +#: ../../source/contributor-how-to-release-flower.rst:46 msgid "" "PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" " MUST use one of the following naming patterns:" msgstr "PyPI 支持预发布版本(alpha、beta、release candidate)。预发布版本必须使用以下命名模式之一:" -#: ../../source/contributor-how-to-release-flower.rst:35 +#: ../../source/contributor-how-to-release-flower.rst:49 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" msgstr "阿尔法 ``MAJOR.MINOR.PATCHaN``" -#: ../../source/contributor-how-to-release-flower.rst:36 +#: ../../source/contributor-how-to-release-flower.rst:50 msgid "Beta: ``MAJOR.MINOR.PATCHbN``" msgstr "贝塔: ``MAJOR.MINOR.PATCHbN``" -#: ../../source/contributor-how-to-release-flower.rst:37 +#: ../../source/contributor-how-to-release-flower.rst:51 msgid "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" msgstr "版本代号 (RC): ``MAJOR.MINOR.PATCHrcN``" -#: ../../source/contributor-how-to-release-flower.rst:39 +#: ../../source/contributor-how-to-release-flower.rst:53 msgid "Examples include:" msgstr "例子包括:" -#: ../../source/contributor-how-to-release-flower.rst:41 +#: ../../source/contributor-how-to-release-flower.rst:55 msgid "``1.0.0a0``" msgstr "``1.0.0a0``" -#: ../../source/contributor-how-to-release-flower.rst:42 +#: ../../source/contributor-how-to-release-flower.rst:56 msgid "``1.0.0b0``" msgstr "``1.0.0b0``" -#: ../../source/contributor-how-to-release-flower.rst:43 +#: ../../source/contributor-how-to-release-flower.rst:57 msgid "``1.0.0rc0``" msgstr "``1.0.0rc0``" -#: ../../source/contributor-how-to-release-flower.rst:44 +#: ../../source/contributor-how-to-release-flower.rst:58 msgid "``1.0.0rc1``" msgstr "``1.0.0rc1``" -#: ../../source/contributor-how-to-release-flower.rst:46 +#: ../../source/contributor-how-to-release-flower.rst:60 msgid "" "This is in line with PEP-440 and the recommendations from the Python " "Packaging Authority (PyPA):" msgstr "这符合 PEP-440 和 Python 包装管理局 (PyPA) 的建议:" -#: ../../source/contributor-how-to-release-flower.rst:49 +#: ../../source/contributor-how-to-release-flower.rst:63 msgid "`PEP-440 `_" msgstr "`PEP-440 `_" -#: ../../source/contributor-how-to-release-flower.rst:50 +#: ../../source/contributor-how-to-release-flower.rst:64 msgid "" "`PyPA Choosing a versioning scheme " "`_" -#: ../../source/contributor-how-to-release-flower.rst:52 +#: ../../source/contributor-how-to-release-flower.rst:67 msgid "" "Note that the approach defined by PyPA is not compatible with SemVer " "2.0.0 spec, for details consult the `Semantic Versioning Specification " @@ -1110,26 +1110,26 @@ msgstr "" "规范不兼容,详情请查阅《语义版本规范》`_(特别是关于优先级的第 11 项)。" -#: ../../source/contributor-how-to-release-flower.rst:55 +#: ../../source/contributor-how-to-release-flower.rst:73 msgid "Pre-release classification" msgstr "发布前分类" -#: ../../source/contributor-how-to-release-flower.rst:57 +#: ../../source/contributor-how-to-release-flower.rst:75 msgid "Should the next pre-release be called alpha, beta, or release candidate?" msgstr "下一个预发布版应该叫阿尔法版、贝塔版还是候选发布版?" -#: ../../source/contributor-how-to-release-flower.rst:59 +#: ../../source/contributor-how-to-release-flower.rst:77 msgid "" "RC: feature complete, no known issues (apart from issues that are " "classified as \"won't fix\" for the next stable release) - if no issues " "surface this will become the next stable release" msgstr "RC:功能完整,无已知问题(除了下一个稳定版中被列为 \"不会修复 \"的问题)--如果没有问题出现,这将成为下一个稳定版" -#: ../../source/contributor-how-to-release-flower.rst:60 +#: ../../source/contributor-how-to-release-flower.rst:80 msgid "Beta: feature complete, allowed to have known issues" msgstr "贝塔版:功能完整,允许存在已知问题" -#: ../../source/contributor-how-to-release-flower.rst:61 +#: ../../source/contributor-how-to-release-flower.rst:81 msgid "Alpha: not feature complete, allowed to have known issues" msgstr "阿尔法版:功能不完整,允许存在已知问题" @@ -1147,12 +1147,12 @@ msgstr "" "建议在虚拟环境中运行 Python 设置。本指南展示了如何使用 pyenv virtualenv、poes 或 Anaconda " "创建虚拟环境的三个不同示例。您可以按照说明或选择您喜欢的设置。" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:9 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:10 msgid "Python Version" msgstr "Python 版本" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:11 -#: ../../source/how-to-install-flower.rst:8 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:12 +#: ../../source/how-to-install-flower.rst:7 msgid "" "Flower requires at least `Python 3.9 `_, " "but `Python 3.10 `_ or above is " @@ -1161,7 +1161,7 @@ msgstr "" "Flower 至少需要 `Python 3.9 `_,但建议使用 `Python " "3.10 `_或更高版本。" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:14 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:17 #, fuzzy msgid "" "Due to a known incompatibility with `ray " @@ -1172,12 +1172,12 @@ msgstr "" "由于已知与 `ray `_ 不兼容,我们目前建议最多使用 `Python 3.11" " `_ 运行 Flower 仿真。" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:19 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:22 #, fuzzy msgid "Virtualenv with Pyenv/Virtualenv" msgstr "Virutualenv 和 Pyenv/Virtualenv" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:21 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:24 msgid "" "One of the recommended virtual environment is `pyenv " "`_/`virtualenv `_。详情请参见 `Flower 示例 " "`_。" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:23 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 msgid "" "Once Pyenv is set up, you can use it to install `Python Version 3.10 " "`_ or above:" msgstr "一旦设置好 Pyenv,就可以用它来安装 `Python 3.10 `_ 或更高版本:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 msgid "Create the virtualenv with:" msgstr "创建虚拟环境:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:42 msgid "Activate the virtualenv by running the following command:" msgstr "运行以下命令激活 virtualenv:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:44 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:49 msgid "Virtualenv with Poetry" msgstr "有诗意的 Virtualenv" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:46 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:51 msgid "" "The Flower examples are based on `Poetry `_ to manage dependencies. After installing Poetry you " @@ -1215,36 +1215,37 @@ msgstr "" "Flower 示例基于 `Poetry `_ 来管理依赖关系。安装 Poetry" " 后,只需创建一个虚拟环境即可:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:52 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:58 msgid "" "If you open a new terminal you can activate the previously created " "virtual environment with the following command:" msgstr "如果打开一个新终端,可以使用以下命令激活之前创建的虚拟环境:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:60 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:66 msgid "Virtualenv with Anaconda" msgstr "使用 Anaconda 的 Virtualenv" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:62 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#, fuzzy msgid "" "If you prefer to use Anaconda for your virtual environment then install " "and setup the `conda `_ package. After setting it up you can " +"/user-guide/install/index.html>`_ package. After setting it up you can " "create a virtual environment with:" msgstr "" "如果你更喜欢在虚拟环境中使用 Anaconda,那么请安装并设置 `conda " "`_ 软件包。设置完成后,您就可以使用以下工具创建虚拟环境:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 msgid "and activate the virtual environment with:" msgstr "并激活虚拟环境:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:83 msgid "And then?" msgstr "然后呢?" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:78 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:85 msgid "" "As soon as you created your virtual environment you clone one of the " "`Flower examples `_." @@ -1256,11 +1257,11 @@ msgstr "" msgid "Write documentation" msgstr "编写文件" -#: ../../source/contributor-how-to-write-documentation.rst:6 +#: ../../source/contributor-how-to-write-documentation.rst:5 msgid "Project layout" msgstr "项目布局" -#: ../../source/contributor-how-to-write-documentation.rst:8 +#: ../../source/contributor-how-to-write-documentation.rst:7 msgid "" "The Flower documentation lives in the ``doc`` directory. The Sphinx-based" " documentation system supports both reStructuredText (``.rst`` files) and" @@ -1270,7 +1271,7 @@ msgstr "" "Markdown(``.md`` 文件)。" #: ../../source/contributor-how-to-write-documentation.rst:10 -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:169 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:193 #, fuzzy msgid "" "Note that, in order to build the documentation locally (with ``poetry run" @@ -1281,20 +1282,20 @@ msgstr "" "请注意,要在本地构建文档(使用 ``poetry run make html``,如下所述),系统上必须安装 ``Pandoc " "_`。" -#: ../../source/contributor-how-to-write-documentation.rst:14 +#: ../../source/contributor-how-to-write-documentation.rst:15 msgid "Edit an existing page" msgstr "编辑现有页面" -#: ../../source/contributor-how-to-write-documentation.rst:16 +#: ../../source/contributor-how-to-write-documentation.rst:17 msgid "Edit an existing ``.rst`` (or ``.md``) file under ``doc/source/``" msgstr "编辑 ``doc/source/`` 下现有的 ``.rst`` (或 ``.md``) 文件" -#: ../../source/contributor-how-to-write-documentation.rst:17 +#: ../../source/contributor-how-to-write-documentation.rst:18 #: ../../source/contributor-how-to-write-documentation.rst:27 msgid "Compile the docs: ``cd doc``, then ``poetry run make html``" msgstr "编译文档: cd doc``,然后 ``poetry run make html``" -#: ../../source/contributor-how-to-write-documentation.rst:18 +#: ../../source/contributor-how-to-write-documentation.rst:19 #: ../../source/contributor-how-to-write-documentation.rst:28 msgid "Open ``doc/build/html/index.html`` in the browser to check the result" msgstr "在浏览器中打开 ``doc/build/html/index.html`` 查看结果" @@ -1329,34 +1330,34 @@ msgstr "" "我们欢迎为Flower做出代码贡献!然而,要知道从哪里开始并非易事。因此,我们提出了一些建议,告诉您从哪里开始,以增加您的 PR 被 Flower" " 代码库接受的机会。" -#: ../../source/contributor-ref-good-first-contributions.rst:11 +#: ../../source/contributor-ref-good-first-contributions.rst:9 msgid "Where to start" msgstr "从哪里开始" -#: ../../source/contributor-ref-good-first-contributions.rst:13 +#: ../../source/contributor-ref-good-first-contributions.rst:11 msgid "" "Until the Flower core library matures it will be easier to get PR's " "accepted if they only touch non-core areas of the codebase. Good " "candidates to get started are:" msgstr "在 Flower 核心库成熟之前,如果 PR 只涉及代码库中的非核心区域,则会更容易被接受。可以从以下方面入手:" -#: ../../source/contributor-ref-good-first-contributions.rst:17 +#: ../../source/contributor-ref-good-first-contributions.rst:14 msgid "Documentation: What's missing? What could be expressed more clearly?" msgstr "文档: 缺少什么?哪些内容可以表达得更清楚?" -#: ../../source/contributor-ref-good-first-contributions.rst:18 +#: ../../source/contributor-ref-good-first-contributions.rst:15 msgid "Baselines: See below." msgstr "Baselines: 见下文。" -#: ../../source/contributor-ref-good-first-contributions.rst:19 +#: ../../source/contributor-ref-good-first-contributions.rst:16 msgid "Examples: See below." msgstr "示例: 见下文。" -#: ../../source/contributor-ref-good-first-contributions.rst:23 +#: ../../source/contributor-ref-good-first-contributions.rst:19 msgid "Request for Flower Baselines" msgstr "Flower Baselines的申请" -#: ../../source/contributor-ref-good-first-contributions.rst:25 +#: ../../source/contributor-ref-good-first-contributions.rst:21 #, fuzzy msgid "" "If you are not familiar with Flower Baselines, you should probably check-" @@ -1366,7 +1367,7 @@ msgstr "" "如果您对 Flower Baselines 还不熟悉,也许可以看看我们的 `Baselines贡献指南 " "`_。" -#: ../../source/contributor-ref-good-first-contributions.rst:27 +#: ../../source/contributor-ref-good-first-contributions.rst:25 #, fuzzy msgid "" "You should then check out the open `issues " @@ -1379,7 +1380,7 @@ msgstr "" "`_" " baseline请求。如果您发现了自己想做的baseline,而它还没有被分配,请随时把它分配给自己,然后开始工作!" -#: ../../source/contributor-ref-good-first-contributions.rst:31 +#: ../../source/contributor-ref-good-first-contributions.rst:30 msgid "" "Otherwise, if you don't find a baseline you'd like to work on, be sure to" " open a new issue with the baseline request template!" @@ -1422,12 +1423,13 @@ msgstr "" "包括 SecAgg、SecAgg+ 和 LightSecAgg 协议。LightSecAgg " "协议尚未实施,因此其图表和抽象在实践中可能并不准确。SecAgg 协议可视为 SecAgg+ 协议的特例。" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:8 -msgid "The :code:`SecAgg+` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:9 +#, fuzzy +msgid "The ``SecAgg+`` abstraction" msgstr "代码:`SecAgg+` 抽象" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:10 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:11 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:163 msgid "" "In this implementation, each client will be assigned with a unique index " "(int) for secure aggregation, and thus many python dictionaries used have" @@ -1436,18 +1438,19 @@ msgstr "" "在此实现中,将为每个客户端分配一个唯一索引(int),以确保聚合的安全性,因此使用的许多 python 字典的键都是 int 类型,而不是 " "ClientProxy 类型。" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:65 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:198 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:67 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:204 msgid "" "The Flower server will execute and process received results in the " "following order:" msgstr "Flower 服务器将按以下顺序执行和处理收到的结果:" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:159 -msgid "The :code:`LightSecAgg` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#, fuzzy +msgid "The ``LightSecAgg`` abstraction" msgstr "代码:`LightSecAgg` 抽象" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:271 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:277 msgid "Types" msgstr "类型" @@ -1461,7 +1464,7 @@ msgid "" "are not used to contributing to GitHub projects." msgstr "本指南适用于想参与 Flower,但不习惯为 GitHub 项目贡献的人。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:6 +#: ../../source/contributor-tutorial-contribute-on-github.rst:7 #, fuzzy msgid "" "If you're familiar with how contributing on GitHub works, you can " @@ -1472,15 +1475,15 @@ msgstr "" "/getting-started-for-contributors.html>`_ 和 \"优秀的首次贡献示例\" " "`_。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:10 +#: ../../source/contributor-tutorial-contribute-on-github.rst:12 msgid "Setting up the repository" msgstr "建立资源库" -#: ../../source/contributor-tutorial-contribute-on-github.rst:21 +#: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "**Create a GitHub account and setup Git**" msgstr "**创建 GitHub 账户并设置 Git**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:13 +#: ../../source/contributor-tutorial-contribute-on-github.rst:15 #, fuzzy msgid "" "Git is a distributed version control tool. This allows for an entire " @@ -1492,20 +1495,20 @@ msgstr "" "Git 是一种分布式版本控制工具。它可以将整个代码库的历史记录保存在每个开发人员的机器上。您需要在本地计算机上安装该软件,可以按照本指南 " "`_ 进行设置。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:16 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "" "GitHub, itself, is a code hosting platform for version control and " "collaboration. It allows for everyone to collaborate and work from " "anywhere on remote repositories." msgstr "GitHub 本身是一个用于版本控制和协作的代码托管平台。它允许每个人在任何地方对远程仓库进行协作和工作。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:18 +#: ../../source/contributor-tutorial-contribute-on-github.rst:25 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." msgstr "如果还没有,您需要在 `GitHub `_ 上创建一个账户。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:20 +#: ../../source/contributor-tutorial-contribute-on-github.rst:28 msgid "" "The idea behind the generic Git and GitHub workflow boils down to this: " "you download code from a remote repository on GitHub, make changes " @@ -1515,22 +1518,22 @@ msgstr "" "通用的 Git 和 GitHub 工作流程背后的理念可以归结为:从 GitHub 上的远程仓库下载代码,在本地进行修改并使用 Git " "进行跟踪,然后将新的历史记录上传回 GitHub。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:32 +#: ../../source/contributor-tutorial-contribute-on-github.rst:42 msgid "**Forking the Flower repository**" msgstr "**叉花仓库**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:24 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 #, fuzzy msgid "" "A fork is a personal copy of a GitHub repository. To create one for " -"Flower, you must navigate to ``_ (while " +"Flower, you must navigate to https://github.com/adap/flower (while " "connected to your GitHub account) and click the ``Fork`` button situated " "on the top right of the page." msgstr "" "fork 是 GitHub 仓库的个人副本。要为 Flower 创建一个 fork,您必须导航到 " "https://github.com/adap/flower(同时连接到您的 GitHub 账户),然后点击页面右上方的 ``Fork`` 按钮。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:29 +#: ../../source/contributor-tutorial-contribute-on-github.rst:38 msgid "" "You can change the name if you want, but this is not necessary as this " "version of Flower will be yours and will sit inside your own account " @@ -1540,11 +1543,11 @@ msgstr "" "您可以更改名称,但没有必要,因为这个版本的 Flower " "将是您自己的,并位于您自己的账户中(即,在您自己的版本库列表中)。创建完成后,您会在左上角看到自己的 Flower 版本。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 msgid "**Cloning your forked repository**" msgstr "**克隆你的分叉仓库**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:35 +#: ../../source/contributor-tutorial-contribute-on-github.rst:45 msgid "" "The next step is to download the forked repository on your machine to be " "able to make changes to it. On your forked repository page, you should " @@ -1554,28 +1557,28 @@ msgstr "" "下一步是在你的机器上下载分叉版本库,以便对其进行修改。在分叉版本库页面上,首先点击右侧的 \"代码 \"按钮,这样就能复制版本库的 HTTPS " "链接。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:41 +#: ../../source/contributor-tutorial-contribute-on-github.rst:52 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" msgstr "一旦复制了 (),你就可以在你的机器上打开一个终端,导航到你想下载软件源的地方,然后键入:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 #, fuzzy msgid "" "This will create a ``flower/`` (or the name of your fork if you renamed " "it) folder in the current working directory." msgstr "这将在当前工作目录下创建一个 `flower/`(如果重命名了,则使用 fork 的名称)文件夹。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:66 +#: ../../source/contributor-tutorial-contribute-on-github.rst:78 msgid "**Add origin**" msgstr "**添加原产地**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:50 +#: ../../source/contributor-tutorial-contribute-on-github.rst:62 msgid "You can then go into the repository folder:" msgstr "然后,您就可以进入存储库文件夹:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:56 +#: ../../source/contributor-tutorial-contribute-on-github.rst:68 msgid "" "And here we will need to add an origin to our repository. The origin is " "the \\ of the remote fork repository. To obtain it, we can do as " @@ -1585,28 +1588,28 @@ msgstr "" "在这里,我们需要为我们的版本库添加一个 origin。origin 是远程 fork 仓库的 " "\\。要获得它,我们可以像前面提到的那样,访问 GitHub 账户上的分叉仓库并复制链接。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:61 +#: ../../source/contributor-tutorial-contribute-on-github.rst:75 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" msgstr "一旦复制了 \\ ,我们就可以在终端中键入以下命令:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:90 +#: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "**Add upstream**" msgstr "**增加上游**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:69 +#: ../../source/contributor-tutorial-contribute-on-github.rst:81 #, fuzzy msgid "" "Now we will add an upstream address to our repository. Still in the same " "directory, we must run the following command:" msgstr "现在,我们要为版本库添加一个上游地址。还是在同一目录下,我们必须运行以下命令:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:76 +#: ../../source/contributor-tutorial-contribute-on-github.rst:88 msgid "The following diagram visually explains what we did in the previous steps:" msgstr "下图直观地解释了我们在前面步骤中的操作:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:80 +#: ../../source/contributor-tutorial-contribute-on-github.rst:92 msgid "" "The upstream is the GitHub remote address of the parent repository (in " "this case Flower), i.e. the one we eventually want to contribute to and " @@ -1617,17 +1620,17 @@ msgstr "" "上游是父版本库(这里是 Flower)的 GitHub 远程地址,即我们最终要贡献的版本库,因此需要最新的历史记录。origin " "只是我们创建的分叉仓库的 GitHub 远程地址,即我们自己账户中的副本(分叉)。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:84 +#: ../../source/contributor-tutorial-contribute-on-github.rst:97 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" msgstr "为了确保本地版本的分叉程序与 Flower 代码库的最新更改保持一致,我们可以执行以下命令:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:93 +#: ../../source/contributor-tutorial-contribute-on-github.rst:105 msgid "Setting up the coding environment" msgstr "设置编码环境" -#: ../../source/contributor-tutorial-contribute-on-github.rst:95 +#: ../../source/contributor-tutorial-contribute-on-github.rst:107 #, fuzzy msgid "" "This can be achieved by following this :doc:`getting started guide for " @@ -1636,155 +1639,158 @@ msgid "" "code and test it, you can finally start making changes!" msgstr "您可以按照这份 \"贡献者入门指南\"__(注意,您不需要克隆版本库)来实现这一点。一旦您能够编写代码并进行测试,您就可以开始修改了!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:100 +#: ../../source/contributor-tutorial-contribute-on-github.rst:113 msgid "Making changes" msgstr "做出改变" -#: ../../source/contributor-tutorial-contribute-on-github.rst:102 +#: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" "Before making any changes make sure you are up-to-date with your " "repository:" msgstr "在进行任何更改之前,请确保您的版本库是最新的:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:108 +#: ../../source/contributor-tutorial-contribute-on-github.rst:121 msgid "And with Flower's repository:" msgstr "还有Flower的存储库:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:122 +#: ../../source/contributor-tutorial-contribute-on-github.rst:134 msgid "**Create a new branch**" msgstr "**创建一个新分支**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:115 +#: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" "To make the history cleaner and easier to work with, it is good practice " "to create a new branch for each feature/project that needs to be " "implemented." msgstr "为了使历史记录更简洁、更易于操作,为每个需要实现的功能/项目创建一个新分支是个不错的做法。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:118 +#: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "" "To do so, just run the following command inside the repository's " "directory:" msgstr "为此,只需在版本库目录下运行以下命令即可:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:136 msgid "**Make changes**" msgstr "**进行修改**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:137 msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "使用您最喜欢的编辑器编写优秀的代码并创建精彩的更改!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:138 +#: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "**Test and format your code**" msgstr "**测试并格式化您的代码**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:128 +#: ../../source/contributor-tutorial-contribute-on-github.rst:139 msgid "" "Don't forget to test and format your code! Otherwise your code won't be " "able to be merged into the Flower repository. This is done so the " "codebase stays consistent and easy to understand." msgstr "不要忘记测试和格式化您的代码!否则您的代码将无法并入 Flower 代码库。这样做是为了使代码库保持一致并易于理解。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:131 +#: ../../source/contributor-tutorial-contribute-on-github.rst:143 msgid "To do so, we have written a few scripts that you can execute:" msgstr "为此,我们编写了一些脚本供您执行:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:150 +#: ../../source/contributor-tutorial-contribute-on-github.rst:162 msgid "**Stage changes**" msgstr "**舞台变化**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:141 +#: ../../source/contributor-tutorial-contribute-on-github.rst:152 msgid "" "Before creating a commit that will update your history, you must specify " "to Git which files it needs to take into account." msgstr "在创建更新历史记录的提交之前,必须向 Git 说明需要考虑哪些文件。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:143 +#: ../../source/contributor-tutorial-contribute-on-github.rst:155 msgid "This can be done with:" msgstr "这可以通过:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:149 +#: ../../source/contributor-tutorial-contribute-on-github.rst:161 +#, fuzzy msgid "" "To check which files have been modified compared to the last version " "(last commit) and to see which files are staged for commit, you can use " -"the :code:`git status` command." +"the ``git status`` command." msgstr "要查看与上一版本(上次提交)相比哪些文件已被修改,以及哪些文件处于提交阶段,可以使用 :code:`git status` 命令。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:160 +#: ../../source/contributor-tutorial-contribute-on-github.rst:173 msgid "**Commit changes**" msgstr "**提交更改**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:153 +#: ../../source/contributor-tutorial-contribute-on-github.rst:165 +#, fuzzy msgid "" -"Once you have added all the files you wanted to commit using :code:`git " -"add`, you can finally create your commit using this command:" +"Once you have added all the files you wanted to commit using ``git add``," +" you can finally create your commit using this command:" msgstr "使用 :code:`git add` 添加完所有要提交的文件后,就可以使用此命令创建提交了:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:159 +#: ../../source/contributor-tutorial-contribute-on-github.rst:172 +#, fuzzy msgid "" "The \\ is there to explain to others what the commit " "does. It should be written in an imperative style and be concise. An " -"example would be :code:`git commit -m \"Add images to README\"`." +"example would be ``git commit -m \"Add images to README\"``." msgstr "" " 用于向他人解释提交的作用。它应该以命令式风格书写,并且简明扼要。例如 :code:`git commit " "-m \"Add images to README\"`。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:171 +#: ../../source/contributor-tutorial-contribute-on-github.rst:185 msgid "**Push the changes to the fork**" msgstr "**将更改推送到分叉**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:163 +#: ../../source/contributor-tutorial-contribute-on-github.rst:176 msgid "" "Once we have committed our changes, we have effectively updated our local" " history, but GitHub has no way of knowing this unless we push our " "changes to our origin's remote address:" msgstr "一旦提交了修改,我们就有效地更新了本地历史记录,但除非我们将修改推送到原点的远程地址,否则 GitHub 无法得知:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:170 +#: ../../source/contributor-tutorial-contribute-on-github.rst:184 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." msgstr "完成此操作后,您将在 GitHub 上看到您的分叉仓库已根据您所做的更改进行了更新。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:174 +#: ../../source/contributor-tutorial-contribute-on-github.rst:188 msgid "Creating and merging a pull request (PR)" msgstr "创建和合并拉取请求 (PR)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:226 msgid "**Create the PR**" msgstr "**创建 PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:177 +#: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" "Once you have pushed changes, on the GitHub webpage of your repository " "you should see the following message:" msgstr "推送更改后,在仓库的 GitHub 网页上应该会看到以下信息:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:181 +#: ../../source/contributor-tutorial-contribute-on-github.rst:196 #, fuzzy msgid "Otherwise you can always find this option in the ``Branches`` page." msgstr "否则,您可以在 \"分支 \"页面找到该选项。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:183 +#: ../../source/contributor-tutorial-contribute-on-github.rst:198 #, fuzzy msgid "" "Once you click the ``Compare & pull request`` button, you should see " "something similar to this:" msgstr "点击 \"比较和拉取请求 \"按钮后,您应该会看到类似下面的内容:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:187 +#: ../../source/contributor-tutorial-contribute-on-github.rst:203 msgid "At the top you have an explanation of which branch will be merged where:" msgstr "在顶部,你可以看到关于哪个分支将被合并的说明:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:191 +#: ../../source/contributor-tutorial-contribute-on-github.rst:207 msgid "" "In this example you can see that the request is to merge the branch " "``doc-fixes`` from my forked repository to branch ``main`` from the " "Flower repository." msgstr "在这个例子中,你可以看到请求将我分叉的版本库中的分支 ``doc-fixes`` 合并到 Flower 版本库中的分支 ``main``。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:193 +#: ../../source/contributor-tutorial-contribute-on-github.rst:210 #, fuzzy msgid "" "The title should be changed to adhere to the :ref:`pr_title_format` " @@ -1794,7 +1800,7 @@ msgstr "" "应该修改标题以符合 :ref:`pr_title_format` 准则,否则将无法合并 PR。因此,在这种情况下,正确的标题可能是 " "``docs(framework:skip)修复错字``。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:196 +#: ../../source/contributor-tutorial-contribute-on-github.rst:214 msgid "" "The input box in the middle is there for you to describe what your PR " "does and to link it to existing issues. We have placed comments (that " @@ -1802,167 +1808,167 @@ msgid "" "process." msgstr "中间的输入框供您描述 PR 的作用,并将其与现有问题联系起来。我们在此放置了注释(一旦 PR 打开,注释将不会显示),以指导您完成整个过程。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:199 +#: ../../source/contributor-tutorial-contribute-on-github.rst:218 #, fuzzy msgid "It is important to follow the instructions described in comments." msgstr "请务必遵守注释中的说明。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:220 msgid "" "At the bottom you will find the button to open the PR. This will notify " "reviewers that a new PR has been opened and that they should look over it" " to merge or to request changes." msgstr "在底部,您可以找到打开 PR 的按钮。这将通知审核人员新的 PR 已经打开,他们应该查看该 PR 以进行合并或要求修改。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:204 +#: ../../source/contributor-tutorial-contribute-on-github.rst:224 msgid "" "If your PR is not yet ready for review, and you don't want to notify " "anyone, you have the option to create a draft pull request:" msgstr "如果您的 PR 尚未准备好接受审核,而且您不想通知任何人,您可以选择创建一个草案拉取请求:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "**Making new changes**" msgstr "**作出新的改变**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:229 msgid "" "Once the PR has been opened (as draft or not), you can still push new " "commits to it the same way we did before, by making changes to the branch" " associated with the PR." msgstr "一旦 PR 被打开(无论是否作为草案),你仍然可以像以前一样,通过修改与 PR 关联的分支来推送新的提交。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:253 msgid "**Review the PR**" msgstr "**审查 PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:212 +#: ../../source/contributor-tutorial-contribute-on-github.rst:233 msgid "" "Once the PR has been opened or once the draft PR has been marked as " "ready, a review from code owners will be automatically requested:" msgstr "一旦 PR 被打开或 PR 草案被标记为就绪,就会自动要求代码所有者进行审核:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:216 +#: ../../source/contributor-tutorial-contribute-on-github.rst:238 msgid "" "Code owners will then look into the code, ask questions, request changes " "or validate the PR." msgstr "然后,代码所有者会查看代码、提出问题、要求修改或验证 PR。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:218 +#: ../../source/contributor-tutorial-contribute-on-github.rst:241 msgid "Merging will be blocked if there are ongoing requested changes." msgstr "如果有正在进行的更改请求,合并将被阻止。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:222 +#: ../../source/contributor-tutorial-contribute-on-github.rst:245 msgid "" "To resolve them, just push the necessary changes to the branch associated" " with the PR:" msgstr "要解决这些问题,只需将必要的更改推送到与 PR 关联的分支即可:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:226 +#: ../../source/contributor-tutorial-contribute-on-github.rst:250 msgid "And resolve the conversation:" msgstr "并解决对话:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:230 +#: ../../source/contributor-tutorial-contribute-on-github.rst:254 msgid "" "Once all the conversations have been resolved, you can re-request a " "review." msgstr "一旦所有对话都得到解决,您就可以重新申请审核。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:274 msgid "**Once the PR is merged**" msgstr "**一旦 PR 被合并**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:234 +#: ../../source/contributor-tutorial-contribute-on-github.rst:256 msgid "" "If all the automatic tests have passed and reviewers have no more changes" " to request, they can approve the PR and merge it." msgstr "如果所有自动测试都已通过,且审核员不再需要修改,他们就可以批准 PR 并将其合并。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:238 +#: ../../source/contributor-tutorial-contribute-on-github.rst:261 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" msgstr "合并后,您可以在 GitHub 上删除该分支(会出现一个删除按钮),也可以在本地删除该分支:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:245 +#: ../../source/contributor-tutorial-contribute-on-github.rst:269 msgid "Then you should update your forked repository by doing:" msgstr "然后,你应该更新你的分叉仓库:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:277 msgid "Example of first contribution" msgstr "首次捐款实例" -#: ../../source/contributor-tutorial-contribute-on-github.rst:257 +#: ../../source/contributor-tutorial-contribute-on-github.rst:280 msgid "Problem" msgstr "问题" -#: ../../source/contributor-tutorial-contribute-on-github.rst:259 +#: ../../source/contributor-tutorial-contribute-on-github.rst:282 #, fuzzy msgid "" "For our documentation, we've started to use the `Diàtaxis framework " "`_." msgstr "对于我们的文档,我们已经开始使用 \"Diàtaxis 框架 `_\"。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:261 +#: ../../source/contributor-tutorial-contribute-on-github.rst:285 #, fuzzy msgid "" "Our \"How to\" guides should have titles that continue the sentence \"How" " to …\", for example, \"How to upgrade to Flower 1.0\"." msgstr "我们的 \"如何 \"指南的标题应延续 \"如何...... \"的句式,例如 \"如何升级到 Flower 1.0\"。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:263 +#: ../../source/contributor-tutorial-contribute-on-github.rst:288 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." msgstr "我们的大多数指南还没有采用这种新格式,而更改其标题(不幸的是)比人们想象的要复杂得多。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:265 +#: ../../source/contributor-tutorial-contribute-on-github.rst:291 #, fuzzy msgid "" "This issue is about changing the title of a doc from present continuous " "to present simple." msgstr "这个问题是关于将文档标题从现在进行时改为现在进行时。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:267 +#: ../../source/contributor-tutorial-contribute-on-github.rst:294 #, fuzzy msgid "" "Let's take the example of \"Saving Progress\" which we changed to \"Save " "Progress\". Does this pass our check?" msgstr "以 \"保存进度 \"为例,我们将其改为 \"保存进度\"。这是否通过了我们的检查?" -#: ../../source/contributor-tutorial-contribute-on-github.rst:269 +#: ../../source/contributor-tutorial-contribute-on-github.rst:297 #, fuzzy msgid "Before: \"How to saving progress\" ❌" msgstr "之前: \"如何保存进度\" ❌" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 +#: ../../source/contributor-tutorial-contribute-on-github.rst:299 #, fuzzy msgid "After: \"How to save progress\" ✅" msgstr "之后: \"如何保存进度\"✅" -#: ../../source/contributor-tutorial-contribute-on-github.rst:274 +#: ../../source/contributor-tutorial-contribute-on-github.rst:302 msgid "Solution" msgstr "解决方案" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:304 #, fuzzy msgid "" "This is a tiny change, but it'll allow us to test your end-to-end setup. " "After cloning and setting up the Flower repo, here's what you should do:" msgstr "这只是一个很小的改动,但可以让我们测试你的端到端设置。克隆并设置好 Flower repo 后,你应该这样做:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 #, fuzzy msgid "Find the source file in ``doc/source``" msgstr "在 `doc/source` 中查找源文件" -#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 #, fuzzy msgid "" "Make the change in the ``.rst`` file (beware, the dashes under the title " "should be the same length as the title itself)" msgstr "在 `.rst` 文件中进行修改(注意,标题下的破折号应与标题本身的长度相同)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:310 #, fuzzy msgid "" "Build the docs and `check the result `_" -#: ../../source/contributor-tutorial-contribute-on-github.rst:283 +#: ../../source/contributor-tutorial-contribute-on-github.rst:314 msgid "Rename file" msgstr "重命名文件" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:316 msgid "" "You might have noticed that the file name still reflects the old wording." " If we just change the file, then we break all existing links to it - it " @@ -1985,33 +1991,33 @@ msgstr "" "您可能已经注意到,文件名仍然反映了旧的措辞。如果我们只是更改文件,那么就会破坏与该文件的所有现有链接--" "避免这种情况是***重要的,破坏链接会损害我们的搜索引擎排名。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 +#: ../../source/contributor-tutorial-contribute-on-github.rst:320 #, fuzzy msgid "Here's how to change the file name:" msgstr "下面是更改文件名的方法:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:322 #, fuzzy msgid "Change the file name to ``save-progress.rst``" msgstr "将文件名改为`save-progress.rst`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +#: ../../source/contributor-tutorial-contribute-on-github.rst:323 #, fuzzy msgid "Add a redirect rule to ``doc/source/conf.py``" msgstr "在 `doc/source/conf.py` 中添加重定向规则" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:325 #, fuzzy msgid "" "This will cause a redirect from ``saving-progress.html`` to ``save-" "progress.html``, old links will continue to work." msgstr "这将导致从 `saving-progress.html` 重定向到 `save-progress.html`,旧链接将继续工作。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:296 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Apply changes in the index file" msgstr "应用索引文件中的更改" -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +#: ../../source/contributor-tutorial-contribute-on-github.rst:331 #, fuzzy msgid "" "For the lateral navigation bar to work properly, it is very important to " @@ -2019,42 +2025,42 @@ msgid "" "arborescence of the navbar." msgstr "要使横向导航栏正常工作,更新 `index.rst` 文件也非常重要。我们就是在这里定义整个导航栏的结构。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 #, fuzzy msgid "Find and modify the file name in ``index.rst``" msgstr "查找并修改 `index.rst` 中的文件名" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:338 msgid "Open PR" msgstr "开放式 PR" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:340 #, fuzzy msgid "" "Commit the changes (commit messages are always imperative: \"Do " "something\", in this case \"Change …\")" msgstr "提交更改(提交信息总是命令式的:\"做某事\",这里是 \"更改......\")" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:342 msgid "Push the changes to your fork" msgstr "将更改推送到分叉" -#: ../../source/contributor-tutorial-contribute-on-github.rst:308 +#: ../../source/contributor-tutorial-contribute-on-github.rst:343 #, fuzzy msgid "" "Open a PR (as shown above) with title ``docs(framework) Update how-to " "guide title``" msgstr "打开一个 PR(如上图所示),标题为\"`docs(framework) Update how-to guide title```\"。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:309 +#: ../../source/contributor-tutorial-contribute-on-github.rst:344 msgid "Wait for it to be approved!" msgstr "等待审批!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:310 +#: ../../source/contributor-tutorial-contribute-on-github.rst:345 msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "祝贺你 🥳 您现在正式成为 \"Flower \"贡献者!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:314 +#: ../../source/contributor-tutorial-contribute-on-github.rst:348 #: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:573 #: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1012 #: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:811 @@ -2063,45 +2069,45 @@ msgstr "祝贺你 🥳 您现在正式成为 \"Flower \"贡献者!" msgid "Next steps" msgstr "接下来的步骤" -#: ../../source/contributor-tutorial-contribute-on-github.rst:316 +#: ../../source/contributor-tutorial-contribute-on-github.rst:350 msgid "" "Once you have made your first PR, and want to contribute more, be sure to" " check out the following :" msgstr "一旦您完成了第一份 PR,并希望做出更多贡献,请务必查看以下内容:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:318 +#: ../../source/contributor-tutorial-contribute-on-github.rst:353 #, fuzzy msgid "" ":doc:`Good first contributions `, where you should particularly look into the " -":code:`baselines` contributions." +"``baselines`` contributions." msgstr "" "`优秀的首次贡献 `_,在这里你应该特别看看 :code:`baselines` 的贡献。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:322 +#: ../../source/contributor-tutorial-contribute-on-github.rst:357 #: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "附录" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:362 #, fuzzy msgid "PR title format" msgstr "PR 标题格式" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:364 #, fuzzy msgid "We enforce the following PR title format:" msgstr "我们执行以下 PR 标题格式:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:370 #, fuzzy msgid "" "(or ``(:skip) `` to ignore the PR in the " "changelog)" msgstr "(或 ``(:skip) `` 忽略更新日志中的 PR)。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +#: ../../source/contributor-tutorial-contribute-on-github.rst:372 #, fuzzy msgid "" "Where ```` needs to be in ``{ci, fix, feat, docs, refactor, " @@ -2114,59 +2120,59 @@ msgstr "" "```` 应该使用 ``{framework, baselines, datasets, examples, 或者 '*' " "当修改多个项目时需要使用 ':skip'标记}``, 并且 ```` 应该以一个大写的动词开始。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:341 +#: ../../source/contributor-tutorial-contribute-on-github.rst:377 #, fuzzy msgid "Valid examples:" msgstr "实例" -#: ../../source/contributor-tutorial-contribute-on-github.rst:343 +#: ../../source/contributor-tutorial-contribute-on-github.rst:379 #, fuzzy msgid "``feat(framework) Add flwr build CLI command``" msgstr "`feat(框架) 添加 flwr build CLI 命令```" -#: ../../source/contributor-tutorial-contribute-on-github.rst:344 +#: ../../source/contributor-tutorial-contribute-on-github.rst:380 #, fuzzy msgid "``refactor(examples:skip) Improve quickstart-pytorch logging``" msgstr "``refactor(examples:skip) Improve quickstart-pytorch logging``." -#: ../../source/contributor-tutorial-contribute-on-github.rst:345 +#: ../../source/contributor-tutorial-contribute-on-github.rst:381 #, fuzzy msgid "``ci(*:skip) Enforce PR title format``" msgstr "`ci(*:skip)执行 PR 标题格式``。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:347 +#: ../../source/contributor-tutorial-contribute-on-github.rst:383 #, fuzzy msgid "Invalid examples:" msgstr "模拟示例" -#: ../../source/contributor-tutorial-contribute-on-github.rst:349 +#: ../../source/contributor-tutorial-contribute-on-github.rst:385 #, fuzzy msgid "``feat(framework): Add flwr build CLI command`` (extra ``:``)" msgstr "`feat(框架): 添加 flwr build CLI 命令``(额外的``:``)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/contributor-tutorial-contribute-on-github.rst:386 #, fuzzy msgid "" "``feat(*) Add flwr build CLI command`` (missing ``skip`` flag along with " "``*``)" msgstr "`feat(*)添加flwr构建CLI命令``(缺少``skip``标志和``*``)。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:351 +#: ../../source/contributor-tutorial-contribute-on-github.rst:387 #, fuzzy msgid "``feat(skip) Add flwr build CLI command`` (missing ````)" msgstr "`feat(skip)添加flwr构建CLI命令``(缺少```)。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:352 +#: ../../source/contributor-tutorial-contribute-on-github.rst:388 #, fuzzy msgid "``feat(framework) add flwr build CLI command`` (non capitalised verb)" msgstr "`feat(framework)添加 flwr 构建 CLI 命令``(非大写动词)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:353 +#: ../../source/contributor-tutorial-contribute-on-github.rst:389 #, fuzzy msgid "``feat(framework) Add flwr build CLI command.`` (dot at the end)" msgstr "feat(框架) 添加 flwr 构建 CLI 命令。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:354 +#: ../../source/contributor-tutorial-contribute-on-github.rst:390 #, fuzzy msgid "``Add flwr build CLI command.`` (missing ``()``)" msgstr "" @@ -2179,8 +2185,9 @@ msgstr "成为贡献者" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 #: ../../source/docker/run-as-subprocess.rst:11 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:15 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:12 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:16 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:13 #: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" msgstr "先决条件" @@ -2205,18 +2212,18 @@ msgstr "(可选) `pyenv-virtualenv #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 #, fuzzy msgid "" -"Flower uses :code:`pyproject.toml` to manage dependencies and configure " +"Flower uses ``pyproject.toml`` to manage dependencies and configure " "development tools (the ones which support it). Poetry is a build tool " "which supports `PEP 517 `_." msgstr "" "Flower 使用 :code:`pyproject.toml` 来管理依赖关系和配置开发工具(支持它的)。Poetry 是一种支持 `PEP " "517 `_ 的构建工具。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:18 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:17 msgid "Developer Machine Setup" msgstr "开发者机器设置" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:21 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:20 #, fuzzy msgid "Preliminaries" msgstr "前言" @@ -2238,110 +2245,110 @@ msgid "" "installation actions to add `brew` to your PATH." msgstr "安装 `homebrew `_。别忘了安装后的操作,将 `brew` 添加到你的 PATH。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:28 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:29 #, fuzzy msgid "" "Install `xz` (to install different Python versions) and `pandoc` to build" -" the docs::" +" the docs:" msgstr "安装 `xz`(用于安装不同的 Python 版本)和 `pandoc` 以构建文档::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:34 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:36 #, fuzzy msgid "For Ubuntu" msgstr "针对 Ubuntu" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:35 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:38 #, fuzzy msgid "" "Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all " -"necessary packages::" +"necessary packages:" msgstr "确保您的系统(Ubuntu 22.04+)为最新版本,并安装了所有必要的软件包::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:44 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:47 #, fuzzy msgid "Create Flower Dev Environment" msgstr "创建/删除虚拟环境" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:46 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:49 #, fuzzy msgid "" -"1. Clone the `Flower repository `_ from " -"GitHub::" +"Clone the `Flower repository `_ from " +"GitHub:" msgstr "首先,从 GitHub 克隆 \"Flower 存储库 `_\":" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:52 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:56 #, fuzzy msgid "" "Let's create the Python environment for all-things Flower. If you wish to" -" use :code:`pyenv`, we provide two convenience scripts that you can use. " -"If you prefer using something else than :code:`pyenv`, create a new " -"environment, activate and skip to the last point where all packages are " -"installed." +" use ``pyenv``, we provide two convenience scripts that you can use. If " +"you prefer using something else than ``pyenv``, create a new environment," +" activate and skip to the last point where all packages are installed." msgstr "" "让我们为 Flower 创建一个 Python 环境。如果您想使用 :code:`pyenv`,我们提供了两个方便的脚本供您使用。如果你不喜欢使用" " :code:`pyenv`,请创建一个新环境,激活并跳到最后一点,即安装所有软件包。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:54 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:61 #, fuzzy msgid "" -"If you don't have :code:`pyenv` installed, the following script that will" -" install it, set it up, and create the virtual environment (with " -":code:`Python 3.9.20` by default)::" +"If you don't have ``pyenv`` installed, the following script that will " +"install it, set it up, and create the virtual environment (with ``Python " +"3.9.20`` by default):" msgstr "" "如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 :code:`Python " "3.9.20)::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:58 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:68 #, fuzzy msgid "" -"If you already have :code:`pyenv` installed (along with the :code:`pyenv-" -"virtualenv` plugin), you can use the following convenience script (with " -":code:`Python 3.9.20` by default)::" +"If you already have ``pyenv`` installed (along with the ``pyenv-" +"virtualenv`` plugin), you can use the following convenience script (with " +"``Python 3.9.20`` by default):" msgstr "" "如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 :code:`Python " "3.9.20)::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:75 #, fuzzy msgid "" -"3. Install the Flower package in development mode (think :code:`pip " -"install -e`) along with all necessary dependencies::" +"3. Install the Flower package in development mode (think ``pip install " +"-e``) along with all necessary dependencies:" msgstr "第三,在开发模式下安装 Flower 软件包(想想 :code:`pip install -e`)以及所有必要的依赖项::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:83 msgid "Convenience Scripts" msgstr "便捷脚本" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:71 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 +#, fuzzy msgid "" "The Flower repository contains a number of convenience scripts to make " -"recurring development tasks easier and less error-prone. See the " -":code:`/dev` subdirectory for a full list. The following scripts are " -"amongst the most important ones:" +"recurring development tasks easier and less error-prone. See the ``/dev``" +" subdirectory for a full list. The following scripts are amongst the most" +" important ones:" msgstr "Flower 软件仓库包含大量便捷脚本,可使重复性开发任务更轻松、更不易出错。完整列表请参见 :code:`/dev` 子目录。以下是最重要的脚本:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:90 msgid "Create/Delete Virtual Environment" msgstr "创建/删除虚拟环境" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:98 msgid "Compile ProtoBuf Definitions" msgstr "编译 ProtoBuf 定义" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:105 msgid "Auto-Format Code" msgstr "自动格式化代码" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:99 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112 msgid "Run Linters and Tests" msgstr "运行分类器和测试" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:106 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 #, fuzzy msgid "Add a pre-commit hook" msgstr "添加预先提交钩子" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:121 #, fuzzy msgid "" "Developers may integrate a pre-commit hook into their workflow utilizing " @@ -2353,31 +2360,31 @@ msgstr "" "库将预提交钩子集成到工作流程中。预提交钩子被配置为执行两个主要操作: `./dev/format.sh`` 和 ``./dev/test.sh``" " 脚本。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:110 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:125 #, fuzzy msgid "There are multiple ways developers can use this:" msgstr "开发人员可以通过多种方式使用它:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:127 #, fuzzy msgid "Install the pre-commit hook to your local git directory by simply running:" msgstr "在本地 git 目录中安装预提交钩子,只需运行" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:118 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133 #, fuzzy msgid "" "Each ``git commit`` will trigger the execution of formatting and " "linting/test scripts." msgstr "每次 \"git 提交 \"都会触发格式化和内核/测试脚本的执行。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135 #, fuzzy msgid "" "If in a hurry, bypass the hook using ``--no-verify`` with the ``git " -"commit`` command. ::" +"commit`` command." msgstr "如果赶时间,可使用 ``--no-verify`` 和 ``git commit` 命令绕过钩子:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 #, fuzzy msgid "" "For developers who prefer not to install the hook permanently, it is " @@ -2385,64 +2392,67 @@ msgid "" " the following command:" msgstr "对于不想永久安装钩子的开发人员,可以使用以下命令在提交更改之前执行一次性检查:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149 #, fuzzy msgid "" "This executes the formatting and linting checks/tests on all the files " "without modifying the default behavior of ``git commit``." msgstr "这将在不修改 ``git commit`` 默认行为的情况下对所有文件执行格式化和词排检查/测试。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:153 msgid "Run Github Actions (CI) locally" msgstr "在本地运行 Github 操作 (CI)" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:155 #, fuzzy msgid "" "Developers could run the full set of Github Actions workflows under their" " local environment by using `Act `_. " "Please refer to the installation instructions under the linked repository" -" and run the next command under Flower main cloned repository folder::" +" and run the next command under Flower main cloned repository folder:" msgstr "" "开发人员可以使用 `Act _` 在本地环境下运行全套 Github Actions" " 工作流程。请参考链接仓库下的安装说明,并在 Flower 主克隆仓库文件夹下运行下一条命令::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:164 msgid "" "The Flower default workflow would run by setting up the required Docker " "machines underneath." msgstr "Flower 默认工作流程将通过在下面设置所需的 Docker 机器来运行。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:147 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:168 msgid "Build Release" msgstr "版本发布" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:170 +#, fuzzy msgid "" "Flower uses Poetry to build releases. The necessary command is wrapped in" -" a simple script::" +" a simple script:" msgstr "Flower 使用 Poetry 创建发布版本。必要的命令封装在一个简单的脚本中::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:154 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:177 +#, fuzzy msgid "" -"The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in" -" the :code:`/dist` subdirectory." +"The resulting ``.whl`` and ``.tar.gz`` releases will be stored in the " +"``/dist`` subdirectory." msgstr "生成的 :code:`.whl` 和 :code:`.tar.gz` 版本将存储在 :code:`/dist` 子目录中。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:159 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:181 msgid "Build Documentation" msgstr "构建文档" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:161 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:183 +#, fuzzy msgid "" "Flower's documentation uses `Sphinx `_. " "There's no convenience script to re-build the documentation yet, but it's" -" pretty easy::" +" pretty easy:" msgstr "" "Flower 的文档使用 `Sphinx `_。目前还没有很方便的脚本来重新构建文档,不过这很容易::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:191 msgid "This will generate HTML documentation in ``doc/build/html``." msgstr "这将在 ``doc/build/html`` 中生成 HTML 文档。" @@ -2490,7 +2500,7 @@ msgid "" msgstr "" #: ../../source/docker/enable-tls.rst:23 -#: ../../source/docker/persist-superlink-state.rst:14 +#: ../../source/docker/persist-superlink-state.rst:15 msgid "" "If you later want to delete the directory, you can change the user ID " "back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " @@ -2514,22 +2524,22 @@ msgstr "" msgid "Understanding the command" msgstr "训练模型" -#: ../../source/docker/enable-tls.rst:44 ../../source/docker/enable-tls.rst:91 +#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 #: ../../source/docker/enable-tls.rst:125 #: ../../source/docker/tutorial-quickstart-docker.rst:66 #: ../../source/docker/tutorial-quickstart-docker.rst:103 -#: ../../source/docker/tutorial-quickstart-docker.rst:213 -#: ../../source/docker/tutorial-quickstart-docker.rst:300 +#: ../../source/docker/tutorial-quickstart-docker.rst:217 +#: ../../source/docker/tutorial-quickstart-docker.rst:305 #, fuzzy msgid "``docker run``: This tells Docker to run a container from an image." msgstr "`docker run``: 这是运行新 Docker 容器的命令。" -#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 +#: ../../source/docker/enable-tls.rst:46 ../../source/docker/enable-tls.rst:93 #: ../../source/docker/enable-tls.rst:126 #: ../../source/docker/tutorial-quickstart-docker.rst:67 #: ../../source/docker/tutorial-quickstart-docker.rst:104 -#: ../../source/docker/tutorial-quickstart-docker.rst:214 -#: ../../source/docker/tutorial-quickstart-docker.rst:301 +#: ../../source/docker/tutorial-quickstart-docker.rst:218 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 msgid "``--rm``: Remove the container once it is stopped or the command exits." msgstr "" @@ -2634,12 +2644,12 @@ msgstr "" msgid "the network." msgstr "" -#: ../../source/docker/enable-tls.rst:71 +#: ../../source/docker/enable-tls.rst:72 #, fuzzy msgid "SuperNode" msgstr "flower-superlink" -#: ../../source/docker/enable-tls.rst:73 +#: ../../source/docker/enable-tls.rst:74 #, fuzzy msgid "" "Assuming that the ``ca.crt`` certificate already exists locally, we can " @@ -2649,7 +2659,7 @@ msgstr "" "假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 ``-v`` 将本地目录挂载到容器的 " "``/app/`` 目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` 标志将证书名称传递给服务器。" -#: ../../source/docker/enable-tls.rst:78 +#: ../../source/docker/enable-tls.rst:79 msgid "" "If you're generating self-signed certificates and the ``ca.crt`` " "certificate doesn't exist on the SuperNode, you can copy it over after " @@ -2765,16 +2775,16 @@ msgstr "" msgid "Getting Started" msgstr "开始" -#: ../../source/docker/index.rst:20 +#: ../../source/docker/index.rst:19 msgid "Running in Production" msgstr "" -#: ../../source/docker/index.rst:29 +#: ../../source/docker/index.rst:28 #, fuzzy msgid "Advanced Options" msgstr "高级安装选项" -#: ../../source/docker/index.rst:41 +#: ../../source/docker/index.rst:40 #, fuzzy msgid "Run Flower using Docker Compose" msgstr "使用 Docker 运行 Flower" @@ -2800,7 +2810,7 @@ msgid "" " on your host system and a name for the database file." msgstr "" -#: ../../source/docker/persist-superlink-state.rst:10 +#: ../../source/docker/persist-superlink-state.rst:11 msgid "" "By default, the SuperLink container runs with a non-root user called " "``app`` with the user ID ``49999``. It is recommended to create a new " @@ -2808,7 +2818,7 @@ msgid "" "the mounted directory has the proper permissions." msgstr "" -#: ../../source/docker/persist-superlink-state.rst:20 +#: ../../source/docker/persist-superlink-state.rst:21 #, fuzzy msgid "" "In the example below, we create a new directory called ``state``, change " @@ -2821,7 +2831,7 @@ msgstr "" " Docker 将用户的主目录(主机上的 ``~/``)挂载到容器的 ``/app/`` 目录中。此外,我们使用标志 ``--database``" " 来指定数据库文件的名称。" -#: ../../source/docker/persist-superlink-state.rst:35 +#: ../../source/docker/persist-superlink-state.rst:36 #, fuzzy msgid "" "As soon as the SuperLink starts, the file ``state.db`` is created in the " @@ -2853,18 +2863,18 @@ msgstr "" "我们可能会更新标签后面的图像。此类更新通常包括系统依赖项的安全更新,不会改变 Flower " "的功能。不过,如果您想确保始终使用同一张图片,可以指定图片的哈希值而不是标签。" -#: ../../source/docker/pin-version.rst:13 +#: ../../source/docker/pin-version.rst:14 #, fuzzy msgid "" "The following command returns the current image digest referenced by the " ":substitution-code:`superlink:|stable_flwr_version|` tag:" msgstr "下面的命令将返回由 ``server:1.7.0-py3.11-ubuntu22.04`` 标记引用的当前图像哈希值:" -#: ../../source/docker/pin-version.rst:22 +#: ../../source/docker/pin-version.rst:23 msgid "This will output" msgstr "" -#: ../../source/docker/pin-version.rst:29 +#: ../../source/docker/pin-version.rst:30 #, fuzzy msgid "Next, we can pin the digest when running a new SuperLink container:" msgstr "接下来,我们可以在运行新服务器容器时将哈希值固定下来:" @@ -2912,7 +2922,7 @@ msgid "" "``USER root`` directive within your Dockerfile." msgstr "" -#: ../../source/docker/run-as-root-user.rst:29 +#: ../../source/docker/run-as-root-user.rst:30 #, fuzzy msgid "SuperNode Dockerfile" msgstr "创建超级节点 Dockerfile" @@ -2939,12 +2949,12 @@ msgid "" "done by extending the SuperNode image:" msgstr "" -#: ../../source/docker/run-as-subprocess.rst:16 +#: ../../source/docker/run-as-subprocess.rst:17 #, fuzzy msgid "Dockerfile.supernode" msgstr "Flower 服务器" -#: ../../source/docker/run-as-subprocess.rst:30 +#: ../../source/docker/run-as-subprocess.rst:31 #, fuzzy msgid "" "Next, build the SuperNode Docker image by running the following command " @@ -2976,82 +2986,83 @@ msgid "" " Engine via Docker Compose." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:12 msgid "" "Some quickstart examples may have limitations or requirements that " "prevent them from running on every environment. For more information, " -"please see `Limitations`_." +"please see Limitations_." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:17 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:14 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:15 #: ../../source/docker/tutorial-quickstart-docker.rst:13 #, fuzzy msgid "Before you start, make sure that:" msgstr "开始之前,请确保 Docker 守护进程正在运行:" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:19 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:16 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 #: ../../source/docker/tutorial-quickstart-docker.rst:15 msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 #: ../../source/docker/tutorial-quickstart-docker.rst:16 #, fuzzy msgid "The Docker daemon is running." msgstr "验证 Docker 守护进程是否正在运行。" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:19 msgid "Docker Compose is `installed `_." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:24 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:25 #, fuzzy msgid "Run the Quickstart Example" msgstr "示例请求" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:26 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:27 msgid "" "Clone the quickstart example you like to run. For example, ``quickstart-" "pytorch``:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:34 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:35 msgid "" "Download the `compose.yml " "`_" " file into the example directory:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:41 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:44 #, fuzzy msgid "Build and start the services using the following command:" msgstr "运行以下命令激活 virtualenv:" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:47 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:50 #, fuzzy msgid "" "Append the following lines to the end of the ``pyproject.toml`` file and " "save it:" msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:49 -#: ../../source/docker/tutorial-quickstart-docker.rst:319 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:52 +#: ../../source/docker/tutorial-quickstart-docker.rst:324 #, fuzzy msgid "pyproject.toml" msgstr "或 ``pyproject.toml```:" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:58 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 msgid "" "You can customize the string that follows ``tool.flwr.federations.`` to " "fit your needs. However, please note that the string cannot contain a dot" " (``.``)." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:64 msgid "" "In this example, ``local-deployment`` has been used. Just remember to " "replace ``local-deployment`` with your chosen name in both the " @@ -3059,57 +3070,57 @@ msgid "" "command." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:65 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 #, fuzzy msgid "Run the example:" msgstr "将示例联邦化" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:71 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:74 msgid "Follow the logs of the SuperExec service:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:77 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 msgid "" "That is all it takes! You can monitor the progress of the run through the" " logs of the SuperExec." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 msgid "Run a Different Quickstart Example" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:82 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:86 msgid "" "To run a different quickstart example, such as ``quickstart-tensorflow``," " first, shut down the Docker Compose services of the current example:" msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:89 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:93 msgid "After that, you can repeat the steps above." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:92 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:98 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:96 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 #, fuzzy msgid "Limitations" msgstr "运行模拟" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:97 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 #, fuzzy msgid "Quickstart Example" msgstr "快速入门 JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:99 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 #, fuzzy msgid "quickstart-fastai" msgstr "快速入门 fastai" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:100 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 #: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 #: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 #: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 @@ -3117,83 +3128,83 @@ msgstr "快速入门 fastai" msgid "None" msgstr "无" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 #, fuzzy msgid "quickstart-huggingface" msgstr "快速入门教程" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 #, fuzzy msgid "quickstart-jax" msgstr "快速入门 JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 #, fuzzy msgid "" "The example has not yet been updated to work with the latest ``flwr`` " "version." msgstr "涵盖 scikit-learn 和 PyTorch Lightning 的代码示例已更新,以便与最新版本的 Flower 配合使用。" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 #, fuzzy msgid "quickstart-mlcube" msgstr "快速入门 JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 #, fuzzy msgid "quickstart-mlx" msgstr "快速入门 JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 msgid "" "`Requires to run on macOS with Apple Silicon `_." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 #, fuzzy msgid "quickstart-monai" msgstr "快速入门 JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 #, fuzzy msgid "quickstart-pandas" msgstr "快速入门Pandas" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:113 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 #, fuzzy msgid "quickstart-pytorch-lightning" msgstr "快速入门 PyTorch Lightning" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 msgid "" "Requires an older pip version that is not supported by the Flower Docker " "images." msgstr "" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 #, fuzzy msgid "quickstart-pytorch" msgstr "PyTorch快速入门" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 #, fuzzy msgid "quickstart-sklearn-tabular" msgstr "scikit-learn快速入门" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 #, fuzzy msgid "quickstart-tabnet" msgstr "快速入门 JAX" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:126 #, fuzzy msgid "quickstart-tensorflow" msgstr "快速入门 TensorFlow" -#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:127 msgid "Only runs on AMD64." msgstr "" @@ -3210,6 +3221,209 @@ msgid "" "environment variables for a container." msgstr "要在 Docker 容器内设置变量,可以使用 ``-e =`` 标志。" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:2 +#, fuzzy +msgid "Deploy Flower on Multiple Machines with Docker Compose" +msgstr "快速入门 iOS" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:4 +msgid "" +"This guide will help you set up a Flower project on multiple machines " +"using Docker Compose." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:7 +msgid "" +"You will learn how to run the Flower client and server components on two " +"separate machines, with Flower configured to use TLS encryption and " +"persist SuperLink state across restarts. A server consists of a SuperLink" +" and ``SuperExec``. For more details about the Flower architecture, refer" +" to the :doc:`../explanation-flower-architecture` explainer page." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:13 +msgid "" +"This guide assumes you have completed the :doc:`tutorial-quickstart-" +"docker-compose` tutorial. It is highly recommended that you follow and " +"understand the contents of that tutorial before proceeding with this " +"guide." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:20 +msgid "Before you begin, make sure you have the following prerequisites:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:23 +msgid "The Docker daemon is running on your local machine and the remote machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:24 +msgid "" +"Docker Compose V2 is installed on both your local machine and the remote " +"machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:25 +msgid "You can connect to the remote machine from your local machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:26 +msgid "Ports ``9091`` and ``9093`` are accessible on the remote machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:30 +msgid "" +"The guide uses the |quickstart_sklearn_tabular|_ example as an example " +"project." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:32 +msgid "" +"If your project has a different name or location, please remember to " +"adjust the commands/paths accordingly." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker.rst:19 +msgid "Step 1: Set Up" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:38 +msgid "Clone the Flower repository and change to the ``distributed`` directory:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:45 +msgid "Get the IP address from the remote machine and save it for later." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 +msgid "" +"Use the ``certs.yml`` Compose file to generate your own self-signed " +"certificates. If you have certificates, you can continue with Step 2." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:51 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:221 +msgid "These certificates should be used only for development purposes." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:53 +msgid "" +"For production environments, you may have to use dedicated services to " +"obtain your certificates." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:56 +msgid "" +"First, set the environment variables ``SUPERLINK_IP`` and " +"``SUPEREXEC_IP`` with the IP address from the remote machine. For " +"example, if the IP is ``192.168.2.33``, execute:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:65 +msgid "Next, generate the self-signed certificates:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:72 +msgid "Step 2: Copy the Server Compose Files" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:74 +msgid "" +"Use the method that works best for you to copy the ``server`` directory, " +"the certificates, and your Flower project to the remote machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:77 +msgid "For example, you can use ``scp`` to copy the directories:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:87 +#, fuzzy +msgid "Step 3: Start the Flower Server Components" +msgstr "然后,我们启动服务器:" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:89 +msgid "" +"Log into the remote machine using ``ssh`` and run the following command " +"to start the SuperLink and SuperExec services:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:102 +msgid "" +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``server`` Docker Compose files." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:105 +msgid "Go back to your terminal on your local machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:108 +#, fuzzy +msgid "Step 4: Start the Flower Client Components" +msgstr "然后,我们启动服务器:" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:110 +msgid "" +"On your local machine, run the following command to start the client " +"components:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:120 +msgid "" +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``client`` Docker Compose files." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:124 +#, fuzzy +msgid "Step 5: Run Your Flower Project" +msgstr "Flower 服务器。" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:126 +msgid "" +"Specify the remote SuperExec IP addresses and the path to the root " +"certificate in the ``[tool.flwr.federations.remote-superexec]`` table in " +"the ``pyproject.toml`` file. Here, we have named our remote federation " +"``remote-superexec``:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:130 +#, fuzzy +msgid "examples/quickstart-sklearn-tabular/pyproject.toml" +msgstr "scikit-learn快速入门" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 +msgid "" +"The Path of the ``root-certificates`` should be relative to the location " +"of the ``pyproject.toml`` file." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:142 +msgid "To run the project, execute:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:148 +msgid "" +"That's it! With these steps, you've set up Flower on two separate " +"machines and are ready to start using it." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +msgid "Step 6: Clean Up" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:154 +#, fuzzy +msgid "Shut down the Flower client components:" +msgstr "Flower 客户端。" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 +msgid "Shut down the Flower server components and delete the SuperLink state:" +msgstr "" + #: ../../source/docker/tutorial-quickstart-docker.rst:2 #, fuzzy msgid "Quickstart with Docker" @@ -3228,12 +3442,7 @@ msgid "" " understanding the basic workflow that uses the minimum configurations." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:21 -#: ../../source/docker/tutorial-quickstart-docker.rst:19 -msgid "Step 1: Set Up" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:31 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:32 #: ../../source/docker/tutorial-quickstart-docker.rst:21 msgid "Create a new Flower project (PyTorch):" msgstr "" @@ -3255,7 +3464,7 @@ msgstr "" msgid "Step 2: Start the SuperLink" msgstr "然后,我们启动服务器:" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:60 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:62 #: ../../source/docker/tutorial-quickstart-docker.rst:52 #, fuzzy msgid "Open your terminal and run:" @@ -3282,8 +3491,8 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst:71 #: ../../source/docker/tutorial-quickstart-docker.rst:108 -#: ../../source/docker/tutorial-quickstart-docker.rst:215 -#: ../../source/docker/tutorial-quickstart-docker.rst:304 +#: ../../source/docker/tutorial-quickstart-docker.rst:219 +#: ../../source/docker/tutorial-quickstart-docker.rst:309 msgid "" "``--network flwr-network``: Make the container join the network named " "``flwr-network``." @@ -3295,8 +3504,8 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker.rst:73 #: ../../source/docker/tutorial-quickstart-docker.rst:110 -#: ../../source/docker/tutorial-quickstart-docker.rst:216 -#: ../../source/docker/tutorial-quickstart-docker.rst:306 +#: ../../source/docker/tutorial-quickstart-docker.rst:220 +#: ../../source/docker/tutorial-quickstart-docker.rst:311 msgid "" "``--detach``: Run the container in the background, freeing up the " "terminal." @@ -3414,13 +3623,13 @@ msgid "" "extends the ClientApp image and installs the required dependencies." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:148 +#: ../../source/docker/tutorial-quickstart-docker.rst:149 msgid "" "Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " "the following code into it:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:150 +#: ../../source/docker/tutorial-quickstart-docker.rst:152 #, fuzzy msgid "Dockerfile.clientapp" msgstr "Flower 客户端。" @@ -3505,7 +3714,7 @@ msgstr "" msgid "the default command run when the container is started." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:184 +#: ../../source/docker/tutorial-quickstart-docker.rst:186 msgid "" "Note that `flwr `__ is already installed " "in the ``flwr/clientapp`` base image, so only other package dependencies " @@ -3514,14 +3723,14 @@ msgid "" "after it has been copied into the Docker image (see line 5)." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:189 +#: ../../source/docker/tutorial-quickstart-docker.rst:192 #, fuzzy msgid "" "Next, build the ClientApp Docker image by running the following command " "in the directory where the Dockerfile is located:" msgstr "接下来,我们在 Dockerfile 和 ServerApp 代码所在的目录下运行以下命令,构建 ServerApp Docker 镜像。" -#: ../../source/docker/tutorial-quickstart-docker.rst:198 +#: ../../source/docker/tutorial-quickstart-docker.rst:201 #, fuzzy msgid "" "The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " @@ -3529,7 +3738,7 @@ msgid "" "them according to your requirements." msgstr "我们给图片命名为 ``flwr_serverapp``,标签为 ``0.0.1``。请记住,这里选择的值只是一个示例。您可以根据自己的需要进行更改。" -#: ../../source/docker/tutorial-quickstart-docker.rst:201 +#: ../../source/docker/tutorial-quickstart-docker.rst:205 #, fuzzy msgid "Start the first ClientApp container:" msgstr "使用虚拟客户端引擎" @@ -3551,35 +3760,35 @@ msgstr "" msgid "``supernode-1:9094``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:222 +#: ../../source/docker/tutorial-quickstart-docker.rst:226 msgid "Start the second ClientApp container:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:233 +#: ../../source/docker/tutorial-quickstart-docker.rst:237 #, fuzzy msgid "Step 5: Start the SuperExec" msgstr "然后,我们启动服务器:" -#: ../../source/docker/tutorial-quickstart-docker.rst:235 +#: ../../source/docker/tutorial-quickstart-docker.rst:239 #, fuzzy msgid "" "The procedure for building and running a SuperExec image is almost " "identical to the ClientApp image." msgstr "构建和运行 ServerApp 映像的程序与 SuperNode 映像几乎完全相同。" -#: ../../source/docker/tutorial-quickstart-docker.rst:237 +#: ../../source/docker/tutorial-quickstart-docker.rst:242 msgid "" "Similar to the ClientApp image, you will need to create a Dockerfile that" " extends the SuperExec image and installs the required FAB dependencies." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:240 +#: ../../source/docker/tutorial-quickstart-docker.rst:245 msgid "" "Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " "the following code in:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:242 +#: ../../source/docker/tutorial-quickstart-docker.rst:248 msgid "Dockerfile.superexec" msgstr "" @@ -3609,13 +3818,13 @@ msgstr "" msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:277 +#: ../../source/docker/tutorial-quickstart-docker.rst:283 msgid "" "Afterward, in the directory that holds the Dockerfile, execute this " "Docker command to build the SuperExec image:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:285 +#: ../../source/docker/tutorial-quickstart-docker.rst:290 #, fuzzy msgid "Start the SuperExec container:" msgstr "启动服务器" @@ -3630,7 +3839,7 @@ msgid "" "``http://localhost:9093``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:305 +#: ../../source/docker/tutorial-quickstart-docker.rst:310 msgid "``--name superexec``: Assign the name ``superexec`` to the container." msgstr "" @@ -3651,80 +3860,80 @@ msgstr "" msgid "connect to the SuperLink running on port ``9091``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:315 +#: ../../source/docker/tutorial-quickstart-docker.rst:320 msgid "Step 6: Run the Quickstart Project" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:317 +#: ../../source/docker/tutorial-quickstart-docker.rst:322 #, fuzzy msgid "Add the following lines to the ``pyproject.toml``:" msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: ../../source/docker/tutorial-quickstart-docker.rst:326 +#: ../../source/docker/tutorial-quickstart-docker.rst:331 msgid "Run the ``quickstart-docker`` project by executing the command:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:332 +#: ../../source/docker/tutorial-quickstart-docker.rst:337 msgid "Follow the SuperExec logs to track the execution of the run:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:339 +#: ../../source/docker/tutorial-quickstart-docker.rst:344 #, fuzzy msgid "Step 7: Update the Application" msgstr "步骤 3:自定义序列化" -#: ../../source/docker/tutorial-quickstart-docker.rst:341 +#: ../../source/docker/tutorial-quickstart-docker.rst:346 msgid "" -"Change the application code. For example, change the ``seed`` in " +"Change the application code. For example, change the ``seed`` in " "``quickstart_docker/task.py`` to ``43`` and save it:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:344 +#: ../../source/docker/tutorial-quickstart-docker.rst:349 #, fuzzy msgid "quickstart_docker/task.py" msgstr "快速入门Pandas" -#: ../../source/docker/tutorial-quickstart-docker.rst:351 +#: ../../source/docker/tutorial-quickstart-docker.rst:356 #, fuzzy msgid "Stop the current ClientApp containers:" msgstr "当前客户端属性。" -#: ../../source/docker/tutorial-quickstart-docker.rst:357 +#: ../../source/docker/tutorial-quickstart-docker.rst:362 #, fuzzy msgid "Rebuild the FAB and ClientApp image:" msgstr "加载数据" -#: ../../source/docker/tutorial-quickstart-docker.rst:363 +#: ../../source/docker/tutorial-quickstart-docker.rst:368 msgid "Launch two new ClientApp containers based on the newly built image:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:378 +#: ../../source/docker/tutorial-quickstart-docker.rst:383 msgid "Run the updated project:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:385 +#: ../../source/docker/tutorial-quickstart-docker.rst:390 msgid "Step 8: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:387 +#: ../../source/docker/tutorial-quickstart-docker.rst:392 msgid "Remove the containers and the bridge network:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:401 -#: ../../source/docker/tutorial-quickstart-docker.rst:399 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:408 +#: ../../source/docker/tutorial-quickstart-docker.rst:404 #, fuzzy msgid "Where to Go Next" msgstr "从哪里开始" -#: ../../source/docker/tutorial-quickstart-docker.rst:401 +#: ../../source/docker/tutorial-quickstart-docker.rst:406 msgid ":doc:`enable-tls`" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:402 +#: ../../source/docker/tutorial-quickstart-docker.rst:407 msgid ":doc:`persist-superlink-state`" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:403 +#: ../../source/docker/tutorial-quickstart-docker.rst:408 msgid ":doc:`tutorial-quickstart-docker-compose`" msgstr "" @@ -3747,180 +3956,180 @@ msgid "" "configuration that best suits your project's needs." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:23 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:24 msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:37 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:38 msgid "" "Export the path of the newly created project. The path should be relative" " to the location of the Docker Compose files:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:44 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:45 msgid "" "Setting the ``PROJECT_DIR`` helps Docker Compose locate the " "``pyproject.toml`` file, allowing it to install dependencies in the " "SuperExec and SuperNode images correctly." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:48 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:49 #, fuzzy msgid "Step 2: Run Flower in Insecure Mode" msgstr "Flower 服务器。" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:50 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 msgid "" "To begin, start Flower with the most basic configuration. In this setup, " "Flower will run without TLS and without persisting the state." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:55 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:56 msgid "" "Without TLS, the data sent between the services remains **unencrypted**. " "Use it only for development purposes." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:58 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:59 msgid "" "For production-oriented use cases, :ref:`enable TLS` for secure data" " transmission." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:68 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:179 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 #, fuzzy msgid "``docker compose``: The Docker command to run the Docker Compose tool." msgstr "`docker run``: 这是运行新 Docker 容器的命令。" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:69 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:180 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 msgid "" "``-f compose.yml``: Specify the YAML file that contains the basic Flower " "service definitions." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 msgid "" "``--build``: Rebuild the images for each service if they don't already " "exist." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:186 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:191 msgid "" "``-d``: Detach the containers from the terminal and run them in the " "background." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:74 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 msgid "Step 3: Run the Quickstart Project" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:78 msgid "" "Now that the Flower services have been started via Docker Compose, it is " "time to run the quickstart example." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:79 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:81 msgid "" "To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" " the SuperExec addresses in the ``pyproject.toml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:82 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:228 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:86 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:234 msgid "quickstart-compose/pyproject.toml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:91 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:93 msgid "Execute the command to run the quickstart example:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:97 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:99 msgid "Monitor the SuperExec logs and wait for the summary to appear:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:104 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 #, fuzzy msgid "Step 4: Update the Application" msgstr "步骤 3:自定义序列化" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 msgid "In the next step, change the application code." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 msgid "" "For example, go to the ``task.py`` file in the ``quickstart-" "compose/quickstart_compose/`` directory and add a ``print`` call in the " "``get_weights`` function:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:111 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:114 msgid "quickstart-compose/quickstart_compose/task.py" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:120 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 #, fuzzy msgid "Rebuild and restart the services." msgstr "我们已经可以启动*服务器*了:" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:124 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 msgid "" "If you have modified the dependencies listed in your ``pyproject.toml`` " "file, it is essential to rebuild images." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:127 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:132 msgid "If you haven't made any changes, you can skip this step." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:134 msgid "Run the following command to rebuild and restart the services:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:135 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:140 msgid "Run the updated quickstart example:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:142 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:147 msgid "In the SuperExec logs, you should find the ``Get weights`` line:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:159 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:164 msgid "Step 5: Persisting the SuperLink State" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:161 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 msgid "" "In this step, Flower services are configured to persist the state of the " "SuperLink service, ensuring that it maintains its state even after a " "restart." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 msgid "" "When working with Docker Compose on Linux, you may need to create the " "``state`` directory first and change its ownership to ensure proper " "access and permissions." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:169 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:174 msgid "" "For more information, consult the following page: :doc:`persist-" "superlink-state`." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:220 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:176 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 msgid "Run the command:" msgstr "" @@ -3941,17 +4150,17 @@ msgid "" "rules>`_." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:188 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:193 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:375 msgid "Rerun the ``quickstart-compose`` project:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:194 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:199 msgid "Check the content of the ``state`` directory:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:201 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:206 msgid "" "You should see a ``state.db`` file in the ``state`` directory. If you " "restart the service, the state file will be used to restore the state " @@ -3959,125 +4168,121 @@ msgid "" "if the containers are stopped and started again." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:208 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 msgid "Step 6: Run Flower with TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:210 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:216 msgid "" "To demonstrate how to enable TLS, generate self-signed certificates using" " the ``certs.yml`` Compose file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:215 -msgid "These certificates should be used only for development purposes." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:217 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 msgid "" "For production environments, use a service like `Let's Encrypt " "`_ to obtain your certificates." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:235 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 msgid "Restart the services with TLS enabled:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:249 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:255 msgid "Step 7: Add another SuperNode" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:251 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 msgid "" "You can add more SuperNodes and ClientApps by duplicating their " "definitions in the ``compose.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:254 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:260 msgid "" "Just give each new SuperNode and ClientApp service a unique service name " "like ``supernode-3``, ``clientapp-3``, etc." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:263 msgid "In ``compose.yml``, add the following:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:259 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:265 msgid "compose.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:310 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:316 msgid "" "If you also want to enable TLS for the new SuperNodes, duplicate the " "SuperNode definition for each new SuperNode service in the ``with-" "tls.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:313 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:319 msgid "" "Make sure that the names of the services match with the one in the " "``compose.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:315 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:321 msgid "In ``with-tls.yml``, add the following:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:317 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:323 msgid "with-tls.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:339 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:345 msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:341 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:347 msgid "" "To run Flower with persisted SuperLink state and enabled TLS, a slight " "change in the ``with-state.yml`` file is required:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:344 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:350 msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:346 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:352 msgid "with-state.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:363 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 #, fuzzy msgid "Restart the services:" msgstr "启动服务器" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:377 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:383 msgid "Step 9: Merge Multiple Compose Files" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:379 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:385 msgid "" "You can merge multiple Compose files into a single file. For instance, if" " you wish to combine the basic configuration with the TLS configuration, " "execute the following command:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:387 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:394 msgid "" "This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" " a new file called ``my_compose.yml``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:391 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:398 msgid "Step 10: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:393 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 #, fuzzy msgid "Remove all services and volumes:" msgstr "从 R 中删除所有项目。" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:403 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:410 #, fuzzy msgid ":doc:`run-quickstart-examples-docker-compose`" msgstr "快速入门 iOS" @@ -4097,7 +4302,7 @@ msgstr "" "如果您想使用不同版本的 Flower 或 Python,可以通过更改标签来实现。我们提供的所有版本都可以在 `Docker Hub " "`_ 上找到。" -#: ../../source/docker/use-a-different-version.rst:9 +#: ../../source/docker/use-a-different-version.rst:10 #, fuzzy msgid "" "When using Flower nightly, the SuperLink nightly image must be paired " @@ -4131,35 +4336,35 @@ msgstr "" "从集中式到联邦式 `_ 做少量改动。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 msgid "Centralized Training" msgstr "集中式训练" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:14 #, fuzzy msgid "" "All files are revised based on :doc:`Example: PyTorch - From Centralized " "To Federated `. The only " -"thing to do is modifying the file called :code:`cifar.py`, revised part " -"is shown below:" +"thing to do is modifying the file called ``cifar.py``, revised part is " +"shown below:" msgstr "" "所有文件均根据 `示例: PyTorch -从集中式到联邦式 `_。唯一要做的就是修改名为 :code:`cifar.py` " "的文件,修改部分如下所示:" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:18 msgid "" "The model architecture defined in class Net() is added with Batch " "Normalization layers accordingly." msgstr "类 Net() 中定义的模型架构会相应添加Batch Normalization层。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:171 msgid "You can now run your machine learning workload:" msgstr "现在,您可以运行您的机器学习工作了:" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 #, fuzzy msgid "" "So far this should all look fairly familiar if you've used PyTorch " @@ -4170,20 +4375,20 @@ msgstr "" "到目前为止,如果您以前使用过 PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,使用我们所构建的内容在 FedBN " "中创建一个联邦学习系统,该系统由一个服务器和两个客户端组成。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:58 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:182 msgid "Federated Training" msgstr "联邦培训" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:60 #, fuzzy msgid "" "If you have read :doc:`Example: PyTorch - From Centralized To Federated " "`, the following parts are" -" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " -"function in :code:`client.py` needed to revise. If not, please read the " -":doc:`Example: PyTorch - From Centralized To Federated `. first." +" easy to follow, only ``get_parameters`` and ``set_parameters`` function " +"in ``client.py`` needed to revise. If not, please read the :doc:`Example:" +" PyTorch - From Centralized To Federated `. first." msgstr "" "如果你读过 `示例: PyTorch - 从集中式到联邦式 `_,下面的部分就很容易理解了,只需要修改 " @@ -4191,41 +4396,43 @@ msgstr "" "函数。如果没有,请阅读 `示例: PyTorch - 从集中式到联邦式 `_。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:66 +#, fuzzy msgid "" "Our example consists of one *server* and two *clients*. In FedBN, " -":code:`server.py` keeps unchanged, we can start the server directly." +"``server.py`` keeps unchanged, we can start the server directly." msgstr "我们的示例包括一个*服务器*和两个*客户端*。在 FedBN 中,:code:`server.py` 保持不变,我们可以直接启动服务器。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:73 +#, fuzzy msgid "" -"Finally, we will revise our *client* logic by changing " -":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " -"we will exclude batch normalization parameters from model parameter list " -"when sending to or receiving from the server." +"Finally, we will revise our *client* logic by changing ``get_parameters``" +" and ``set_parameters`` in ``client.py``, we will exclude batch " +"normalization parameters from model parameter list when sending to or " +"receiving from the server." msgstr "" "最后,我们将修改 *client* 的逻辑,修改 :code:`client.py` 中的 :code:`get_parameters` 和 " ":code:`set_parameters`,在向服务器发送或从服务器接收时,我们将从模型参数列表中排除batch " "normalization层的参数。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:102 msgid "Now, you can now open two additional terminal windows and run" msgstr "现在,您可以打开另外两个终端窗口并运行程序" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:108 msgid "" "in each window (make sure that the server is still running before you do " "so) and see your (previously centralized) PyTorch project run federated " "learning with FedBN strategy across two clients. Congratulations!" msgstr "确保服务器仍在运行后,然后您就能看到您的 PyTorch 项目(之前是集中式的)通过 FedBN 策略在两个客户端上运行联合学习。祝贺!" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 -#: ../../source/tutorial-quickstart-jax.rst:283 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:113 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:349 +#: ../../source/tutorial-quickstart-jax.rst:319 msgid "Next Steps" msgstr "下一步工作" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:115 msgid "" "The full source code for this example can be found `here " "`_\"教程,采用集中式训练方法介绍了这项机器学习任务。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:14 msgid "" "We begin with a brief description of the centralized CNN training code. " "If you want a more in-depth explanation of what's going on then have a " @@ -4269,57 +4476,62 @@ msgstr "" "我们首先简要介绍一下集中式 CNN 训练代码。如果您想获得更深入的解释,请参阅 PyTorch 官方教程`PyTorch tutorial " "`_。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:18 +#, fuzzy msgid "" -"Let's create a new file called :code:`cifar.py` with all the components " +"Let's create a new file called ``cifar.py`` with all the components " "required for a traditional (centralized) training on CIFAR-10. First, all" -" required packages (such as :code:`torch` and :code:`torchvision`) need " -"to be imported. You can see that we do not import any package for " -"federated learning. You can keep all these imports as they are even when " -"we add the federated learning components at a later point." +" required packages (such as ``torch`` and ``torchvision``) need to be " +"imported. You can see that we do not import any package for federated " +"learning. You can keep all these imports as they are even when we add the" +" federated learning components at a later point." msgstr "" "让我们创建一个名为 :code:`cifar.py` 的新文件,其中包含 CIFAR-10 " "传统(集中)培训所需的所有组件。首先,需要导入所有必需的软件包(如 :code:`torch` 和 " ":code:`torchvision`)。您可以看到,我们没有导入任何用于联邦学习的软件包。即使在以后添加联邦学习组件时,也可以保留所有这些导入。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:36 +#, fuzzy msgid "" "As already mentioned we will use the CIFAR-10 dataset for this machine " "learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in :code:`class Net()`." +"Neural Network) is defined in ``class Net()``." msgstr "" "如前所述,我们将使用 CIFAR-10 数据集进行机器学习。模型架构(一个非常简单的卷积神经网络)在 :code:`class Net()` " "中定义。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:62 +#, fuzzy msgid "" -"The :code:`load_data()` function loads the CIFAR-10 training and test " -"sets. The :code:`transform` normalized the data after loading." +"The ``load_data()`` function loads the CIFAR-10 training and test sets. " +"The ``transform`` normalized the data after loading." msgstr "" ":code:`load_data()` 函数加载 CIFAR-10 " "训练集和测试集。加载数据后,:code:`transform`函数对数据进行了归一化处理。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:84 +#, fuzzy msgid "" -"We now need to define the training (function :code:`train()`) which loops" -" over the training set, measures the loss, backpropagates it, and then " +"We now need to define the training (function ``train()``) which loops " +"over the training set, measures the loss, backpropagates it, and then " "takes one optimizer step for each batch of training examples." msgstr "现在,我们需要定义训练函数(:code:`train()`),该函数在训练集上循环训练,计算损失值并反向传播,然后为每批训练数据在优化器上执行一个优化步骤。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:88 +#, fuzzy msgid "" -"The evaluation of the model is defined in the function :code:`test()`. " -"The function loops over all test samples and measures the loss of the " -"model based on the test dataset." +"The evaluation of the model is defined in the function ``test()``. The " +"function loops over all test samples and measures the loss of the model " +"based on the test dataset." msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并计算测试数据集的模型损失值。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:149 msgid "" "Having defined the data loading, model architecture, training, and " "evaluation we can put everything together and train our CNN on CIFAR-10." msgstr "在确定了数据加载、模型架构、训练和评估之后,我们就可以将所有整合在一起,在 CIFAR-10 上训练我们的 CNN。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:177 msgid "" "So far, this should all look fairly familiar if you've used PyTorch " "before. Let's take the next step and use what we've built to create a " @@ -4329,7 +4541,7 @@ msgstr "" "到目前为止,如果你以前用过 " "PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,利用我们所构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:184 msgid "" "The simple machine learning project discussed in the previous section " "trains the model on a single dataset (CIFAR-10), we call this centralized" @@ -4340,17 +4552,18 @@ msgid "" "everything up from scratch. This can be a considerable effort." msgstr "上一节讨论的简单机器学习项目在单一数据集(CIFAR-10)上训练模型,我们称之为集中学习。如上一节所示,集中学习的概念可能为大多数人所熟知,而且很多人以前都使用过。通常情况下,如果要以联邦方式运行机器学习工作,就必须更改大部分代码,并从头开始设置一切。这可能是一个相当大的工作量。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 msgid "" "However, with Flower you can evolve your pre-existing code into a " "federated learning setup without the need for a major rewrite." msgstr "不过,有了 Flower,您可以轻松地将已有的代码转变成联邦学习的模式,无需进行大量重写。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:194 +#, fuzzy msgid "" "The concept is easy to understand. We have to start a *server* and then " -"use the code in :code:`cifar.py` for the *clients* that are connected to " -"the *server*. The *server* sends model parameters to the clients. The " +"use the code in ``cifar.py`` for the *clients* that are connected to the " +"*server*. The *server* sends model parameters to the clients. The " "*clients* run the training and update the parameters. The updated " "parameters are sent back to the *server* which averages all received " "parameter updates. This describes one round of the federated learning " @@ -4359,45 +4572,47 @@ msgstr "" "这个概念很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " ":code:`cifar.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 -#: ../../source/tutorial-quickstart-jax.rst:129 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:201 +#: ../../source/tutorial-quickstart-jax.rst:147 +#, fuzzy msgid "" "Our example consists of one *server* and two *clients*. Let's set up " -":code:`server.py` first. The *server* needs to import the Flower package " -":code:`flwr`. Next, we use the :code:`start_server` function to start a " -"server and tell it to perform three rounds of federated learning." +"``server.py`` first. The *server* needs to import the Flower package " +"``flwr``. Next, we use the ``start_server`` function to start a server " +"and tell it to perform three rounds of federated learning." msgstr "" "我们的示例包括一个*服务器*和两个*客户端*。让我们先设置 :code:`server.py`。*服务器*需要导入 Flower 软件包 " ":code:`flwr`。接下来,我们使用 :code:`start_server` 函数启动服务器,并让它执行三轮联邦学习。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -#: ../../source/tutorial-quickstart-jax.rst:139 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:215 +#: ../../source/tutorial-quickstart-jax.rst:161 msgid "We can already start the *server*:" msgstr "我们已经可以启动*服务器*了:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 +#, fuzzy msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined centralized training in :code:`cifar.py`. " -"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " -"update the parameters on our PyTorch model:" +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined centralized training in ``cifar.py``. Our " +"*client* needs to import ``flwr``, but also ``torch`` to update the " +"parameters on our PyTorch model:" msgstr "" "最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 :code:`cifar.py` " "中定义的集中式训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 :code:`torch`,以更新 " "PyTorch 模型的参数:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:238 +#, fuzzy msgid "" "Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " -"some of the boilerplate that would otherwise be necessary. " -":code:`CifarClient` needs to implement four methods, two methods for " -"getting/setting model parameters, one method for training the model, and " -"one method for testing the model:" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``CifarClient``. ``NumPyClient`` is slightly easier to implement " +"than ``Client`` if you use a framework with good NumPy interoperability " +"(like PyTorch or TensorFlow/Keras) because it avoids some of the " +"boilerplate that would otherwise be necessary. ``CifarClient`` needs to " +"implement four methods, two methods for getting/setting model parameters," +" one method for training the model, and one method for testing the model:" msgstr "" "实现 Flower *client*基本上意味着实现 :code:`flwr.client.Client` 或 " ":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " @@ -4406,115 +4621,121 @@ msgstr "" ":code:`Client`略微容易一些,因为它避免了一些不必要的操作。:code:`CifarClient` " "需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -msgid ":code:`set_parameters`" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#, fuzzy +msgid "``set_parameters``" msgstr ":code:`set_parameters`" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -#: ../../source/tutorial-quickstart-jax.rst:166 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:248 +#: ../../source/tutorial-quickstart-jax.rst:192 msgid "" "set the model parameters on the local model that are received from the " "server" msgstr "在本地模型上设置从服务器接收的模型参数" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 -#: ../../source/tutorial-quickstart-jax.rst:168 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#: ../../source/tutorial-quickstart-jax.rst:194 +#, fuzzy msgid "" -"loop over the list of model parameters received as NumPy " -":code:`ndarray`'s (think list of neural network layers)" +"loop over the list of model parameters received as NumPy ``ndarray``'s " +"(think list of neural network layers)" msgstr "循环遍历以 NumPy :code:`ndarray` 形式接收的模型参数列表(可以看作神经网络的列表)" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -#: ../../source/tutorial-quickstart-jax.rst:169 -#: ../../source/tutorial-quickstart-scikitlearn.rst:118 -msgid ":code:`get_parameters`" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#: ../../source/tutorial-quickstart-scikitlearn.rst:129 +#, fuzzy +msgid "``get_parameters``" msgstr ":code:`get_parameters`" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 -#: ../../source/tutorial-quickstart-jax.rst:170 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#, fuzzy msgid "" -"get the model parameters and return them as a list of NumPy " -":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" +"get the model parameters and return them as a list of NumPy ``ndarray``'s" +" (which is what ``flwr.client.NumPyClient`` expects)" msgstr "" "获取模型参数,并以 NumPy :code:`ndarray`的列表形式返回(这正是 " ":code:`flwr.client.NumPyClient`所匹配的格式)" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid ":code:`fit`" -msgstr ":code:`fit`" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +msgid "``fit``" +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 -#: ../../source/tutorial-quickstart-jax.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:176 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:255 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:260 +#: ../../source/tutorial-quickstart-jax.rst:200 +#: ../../source/tutorial-quickstart-jax.rst:205 msgid "" "update the parameters of the local model with the parameters received " "from the server" msgstr "用从服务器接收到的参数更新本地模型的参数" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 msgid "train the model on the local training set" msgstr "在本地训练集上训练模型" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:258 msgid "get the updated local model weights and return them to the server" msgstr "获取更新后的本地模型参数并发送回服务器" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -#: ../../source/tutorial-quickstart-jax.rst:178 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid ":code:`evaluate`" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +#: ../../source/tutorial-quickstart-jax.rst:208 +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 +#, fuzzy +msgid "``evaluate``" msgstr ":code:`evaluate`" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 -#: ../../source/tutorial-quickstart-jax.rst:177 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:262 +#: ../../source/tutorial-quickstart-jax.rst:207 msgid "evaluate the updated model on the local test set" msgstr "在本地测试集上评估更新后的模型" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 msgid "return the local loss and accuracy to the server" msgstr "向服务器返回本地损失值和精确度" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:265 +#, fuzzy msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`test()` previously " -"defined in :code:`cifar.py`. So what we really do here is we tell Flower " -"through our :code:`NumPyClient` subclass which of our already defined " -"functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``test()`` previously defined in ``cifar.py``. " +"So what we really do here is we tell Flower through our ``NumPyClient`` " +"subclass which of our already defined functions to call for training and " +"evaluation. We included type annotations to give you a better " +"understanding of the data types that get passed around." msgstr "" "这两个 :code:`NumPyClient` 中的方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " ":code:`cifar.py` 中定义的函数 :code:`train()` 和 :code:`test()`。因此,我们在这里要做的就是通过 " ":code:`NumPyClient` 子类告知 Flower " "在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让你更好地理解传递的数据类型。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:315 +#, fuzzy msgid "" "All that's left to do it to define a function that loads both model and " -"data, creates a :code:`CifarClient`, and starts this client. You load " -"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " -"with the function :code:`fl.client.start_client()` by pointing it at the " -"same IP address we used in :code:`server.py`:" +"data, creates a ``CifarClient``, and starts this client. You load your " +"data and model by using ``cifar.py``. Start ``CifarClient`` with the " +"function ``fl.client.start_client()`` by pointing it at the same IP " +"address we used in ``server.py``:" msgstr "剩下的就是定义模型和数据加载函数了。创建一个:code:`CifarClient`类,并运行这个客服端。您将通过:code:`cifar.py`加载数据和模型。另外,通过:code:`fl.client.start_client()`函数来运行客户端:code:`CifarClient`,需要保证IP地址和:code:`server.py`中所使用的一致:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 -#: ../../source/tutorial-quickstart-jax.rst:274 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:338 +#: ../../source/tutorial-quickstart-jax.rst:309 msgid "And that's it. You can now open two additional terminal windows and run" msgstr "就是这样,现在你可以打开另外两个终端窗口,然后运行" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:344 msgid "" "in each window (make sure that the server is running before you do so) " "and see your (previously centralized) PyTorch project run federated " "learning across two clients. Congratulations!" msgstr "确保服务器正在运行后,您就能看到您的 PyTorch 项目(之前是集中式的)在两个客户端上运行联邦学习了。祝贺!" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:351 msgid "" "The full source code for this example: `PyTorch: From Centralized To " "Federated (Code) `_了解更多信息。" -#: ../../source/how-to-authenticate-supernodes.rst:15 +#: ../../source/how-to-authenticate-supernodes.rst:20 #, fuzzy msgid "" "This guide covers a preview feature that might change in future versions " "of Flower." msgstr "本指南涵盖的预览功能可能会在 Flower 的未来版本中有所改变。" -#: ../../source/how-to-authenticate-supernodes.rst:18 +#: ../../source/how-to-authenticate-supernodes.rst:24 #, fuzzy msgid "" "For increased security, node authentication can only be used when " "encrypted connections (SSL/TLS) are enabled." msgstr "为提高安全性,只有启用加密连接(SSL/TLS)时才能使用节点验证。" -#: ../../source/how-to-authenticate-supernodes.rst:21 +#: ../../source/how-to-authenticate-supernodes.rst:28 #, fuzzy -msgid "Enable node authentication in :code:`SuperLink`" +msgid "Enable node authentication in ``SuperLink``" msgstr "在 :code:`SuperLink` 中启用节点验证" -#: ../../source/how-to-authenticate-supernodes.rst:23 +#: ../../source/how-to-authenticate-supernodes.rst:30 #, fuzzy msgid "" "To enable node authentication, first you need to configure SSL/TLS " "connections to secure the SuperLink<>SuperNode communication. You can " "find the complete guide `here `_. After configuring secure connections, you" -" can enable client authentication in a long-running Flower " -":code:`SuperLink`. Use the following terminal command to start a Flower " -":code:`SuperNode` that has both secure connections and node " -"authentication enabled:" +" can enable client authentication in a long-running Flower ``SuperLink``." +" Use the following terminal command to start a Flower ``SuperNode`` that " +"has both secure connections and node authentication enabled:" msgstr "" "要启用节点验证,首先需要配置 SSL/TLS 连接,以确保 SuperLink<>SuperNode 通信的安全。您可以在 " "`_ " @@ -5871,23 +6093,23 @@ msgstr "" ":code:`SuperLink`中启用客户端身份验证。使用以下终端命令启动一个同时启用了安全连接和节点验证的 Flower " ":code:`SuperNode`:" -#: ../../source/how-to-authenticate-supernodes.rst:38 +#: ../../source/how-to-authenticate-supernodes.rst:47 #, fuzzy msgid "Let's break down the authentication flags:" msgstr "让我们来分析一下身份验证标志:" -#: ../../source/how-to-authenticate-supernodes.rst:40 +#: ../../source/how-to-authenticate-supernodes.rst:49 #, fuzzy msgid "" -"The first flag :code:`--auth-list-public-keys` expects a path to a CSV " -"file storing all known node public keys. You need to store all known node" -" public keys that are allowed to participate in a federation in one CSV " -"file (:code:`.csv`)." +"The first flag ``--auth-list-public-keys`` expects a path to a CSV file " +"storing all known node public keys. You need to store all known node " +"public keys that are allowed to participate in a federation in one CSV " +"file (``.csv``)." msgstr "" "第一个标志 :code:`--auth-list-public-keys`(密码:`--auth-list-public-keys`)需要一个 " "CSV 文件路径,该文件存储了所有已知节点的公钥。您需要在一个 CSV 文件(:code:`.csv`)中存储所有允许参与联盟的已知节点公钥。" -#: ../../source/how-to-authenticate-supernodes.rst:42 +#: ../../source/how-to-authenticate-supernodes.rst:53 #, fuzzy msgid "" "A valid CSV file storing known node public keys should list the keys in " @@ -5898,19 +6120,19 @@ msgstr "" "存储已知节点公开密钥的有效 CSV 文件应以 OpenSSH " "格式列出密钥,以逗号分隔,不含任何注释。有关示例,请参阅我们的代码示例,其中包含一个包含两个已知节点公钥的 CSV 文件。" -#: ../../source/how-to-authenticate-supernodes.rst:44 +#: ../../source/how-to-authenticate-supernodes.rst:57 #, fuzzy msgid "" -"The second and third flags :code:`--auth-superlink-private-key` and :code" -":`--auth-superlink-public-key` expect paths to the server's private and " -"public keys. For development purposes, you can generate a private and " -"public key pair using :code:`ssh-keygen -t ecdsa -b 384`." +"The second and third flags ``--auth-superlink-private-key`` and ``--auth-" +"superlink-public-key`` expect paths to the server's private and public " +"keys. For development purposes, you can generate a private and public key" +" pair using ``ssh-keygen -t ecdsa -b 384``." msgstr "" "第二和第三个标记 :code:`--auth-superlink-private-key` 和 :code:`--auth-superlink-" "public-key` 希望指向服务器私钥和公钥的路径。出于开发目的,您可以使用 :code:`ssh-keygen -t ecdsa -b " "384` 生成一对私钥和公钥。" -#: ../../source/how-to-authenticate-supernodes.rst:47 +#: ../../source/how-to-authenticate-supernodes.rst:64 #, fuzzy msgid "" "In Flower 1.9, there is no support for dynamically removing, editing, or " @@ -5922,41 +6144,41 @@ msgstr "" "在 Flower 1.9 中,超级链接不支持动态删除、编辑或添加已知节点公钥。要更改已知节点集,您需要关闭服务器,编辑 CSV " "文件,然后重新启动服务器。动态更改已知节点集的支持已列入 Flower 1.10(预计发布时间:6 月)的路线图。" -#: ../../source/how-to-authenticate-supernodes.rst:53 +#: ../../source/how-to-authenticate-supernodes.rst:71 #, fuzzy -msgid "Enable node authentication in :code:`SuperNode`" +msgid "Enable node authentication in ``SuperNode``" msgstr "在 :code:`SuperNode` 中启用节点验证" -#: ../../source/how-to-authenticate-supernodes.rst:55 +#: ../../source/how-to-authenticate-supernodes.rst:73 #, fuzzy msgid "" -"Similar to the long-running Flower server (:code:`SuperLink`), you can " -"easily enable node authentication in the long-running Flower client " -"(:code:`SuperNode`). Use the following terminal command to start an " -"authenticated :code:`SuperNode`:" +"Similar to the long-running Flower server (``SuperLink``), you can easily" +" enable node authentication in the long-running Flower client " +"(``SuperNode``). Use the following terminal command to start an " +"authenticated ``SuperNode``:" msgstr "" "与长期运行的 Flower 服务器(:code:`SuperLink`)类似,您也可以在长期运行的 Flower " "客户端(:code:`SuperNode`)中轻松启用节点身份验证。使用以下终端命令启动已验证的 :code:`SuperNode`:" -#: ../../source/how-to-authenticate-supernodes.rst:66 +#: ../../source/how-to-authenticate-supernodes.rst:85 #, fuzzy msgid "" -"The :code:`--auth-supernode-private-key` flag expects a path to the " -"node's private key file and the :code:`--auth-supernode-public-key` flag " -"expects a path to the node's public key file. For development purposes, " -"you can generate a private and public key pair using :code:`ssh-keygen -t" -" ecdsa -b 384`." +"The ``--auth-supernode-private-key`` flag expects a path to the node's " +"private key file and the ``--auth-supernode-public-key`` flag expects a " +"path to the node's public key file. For development purposes, you can " +"generate a private and public key pair using ``ssh-keygen -t ecdsa -b " +"384``." msgstr "" ":code:`--auth-supernode-private-key`标志需要节点私钥文件的路径,:code:`-auth-supernode-" "public-key`标志需要节点公钥文件的路径。出于开发目的,可以使用 :code:`ssh-keygen -t ecdsa -b 384` " "生成一对私钥和公钥。" -#: ../../source/how-to-authenticate-supernodes.rst:70 +#: ../../source/how-to-authenticate-supernodes.rst:91 #, fuzzy msgid "Security notice" msgstr "安全通知" -#: ../../source/how-to-authenticate-supernodes.rst:72 +#: ../../source/how-to-authenticate-supernodes.rst:93 #, fuzzy msgid "" "The system's security relies on the credentials of the SuperLink and each" @@ -5970,20 +6192,20 @@ msgstr "" "系统的安全性依赖于超级链接和每个超级节点的凭证。因此,必须保护和安全存储凭证,以避免公钥基础设施 (PKI) " "假冒攻击等安全风险。节点验证机制还涉及人机交互,因此请确保使用可信的通信方法,以安全的方式进行所有通信。" -#: ../../source/how-to-authenticate-supernodes.rst:77 -#: ../../source/how-to-enable-ssl-connections.rst:68 -#: ../../source/how-to-use-built-in-mods.rst:85 +#: ../../source/how-to-authenticate-supernodes.rst:100 +#: ../../source/how-to-enable-ssl-connections.rst:71 +#: ../../source/how-to-use-built-in-mods.rst:95 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 msgid "Conclusion" msgstr "总结" -#: ../../source/how-to-authenticate-supernodes.rst:79 +#: ../../source/how-to-authenticate-supernodes.rst:102 #, fuzzy msgid "" "You should now have learned how to start a long-running Flower server " -"(:code:`SuperLink`) and client (:code:`SuperNode`) with node " -"authentication enabled. You should also know the significance of the " -"private key and store it safely to minimize security risks." +"(``SuperLink``) and client (``SuperNode``) with node authentication " +"enabled. You should also know the significance of the private key and " +"store it safely to minimize security risks." msgstr "" "现在,您应该已经学会了如何启动长期运行的 Flower " "服务器(:code:`SuperLink`)和客户端(:code:`SuperNode`)并启用节点身份验证。您还应该知道私钥的重要性,并将其安全存储,以尽量减少安全风险。" @@ -6000,11 +6222,11 @@ msgid "" "the server." msgstr "除了模型参数,Flower 还可以向客户端发送配置值。配置值有多种用途。它们是一种从服务器控制客户端超参数的常用方法。" -#: ../../source/how-to-configure-clients.rst:7 +#: ../../source/how-to-configure-clients.rst:9 msgid "Configuration values" msgstr "配置值" -#: ../../source/how-to-configure-clients.rst:9 +#: ../../source/how-to-configure-clients.rst:11 msgid "" "Configuration values are represented as a dictionary with ``str`` keys " "and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " @@ -6014,7 +6236,7 @@ msgstr "" "配置值以字典的形式表示,字典的键为 ``str``,值的类型为 ``bool``、``bytes``、``double``(64 " "位精度浮点型)、``int``或 ``str`(或不同语言中的等效类型)。下面是一个 Python 配置字典的示例:" -#: ../../source/how-to-configure-clients.rst:20 +#: ../../source/how-to-configure-clients.rst:25 msgid "" "Flower serializes these configuration dictionaries (or *config dict* for " "short) to their ProtoBuf representation, transports them to the client " @@ -6023,7 +6245,7 @@ msgstr "" "Flower 将这些配置字典(简称 *config dict*)序列化为 ProtoBuf 表示形式,使用 gRPC " "将其传输到客户端,然后再反序列化为 Python 字典。" -#: ../../source/how-to-configure-clients.rst:24 +#: ../../source/how-to-configure-clients.rst:31 msgid "" "Currently, there is no support for directly sending collection types " "(e.g., ``Set``, ``List``, ``Map``) as values in configuration " @@ -6034,7 +6256,7 @@ msgstr "" "目前,还不支持在配置字典中直接发送作为值的集合类型(例如,`Set``, `List`, " "`Map``)。有几种变通方法可将集合转换为支持的值类型之一(并在客户端将其转换回),从而将集合作为值发送。" -#: ../../source/how-to-configure-clients.rst:26 +#: ../../source/how-to-configure-clients.rst:36 msgid "" "One can, for example, convert a list of floating-point numbers to a JSON " "string, then send the JSON string using the configuration dictionary, and" @@ -6042,23 +6264,24 @@ msgid "" " the client." msgstr "例如,可以将浮点数列表转换为 JSON 字符串,然后使用配置字典发送 JSON 字符串,再在客户端将 JSON 字符串转换回浮点数列表。" -#: ../../source/how-to-configure-clients.rst:30 +#: ../../source/how-to-configure-clients.rst:41 msgid "Configuration through built-in strategies" msgstr "通过内置策略进行配置" -#: ../../source/how-to-configure-clients.rst:32 +#: ../../source/how-to-configure-clients.rst:43 +#, fuzzy msgid "" "The easiest way to send configuration values to clients is to use a " -"built-in strategy like :code:`FedAvg`. Built-in strategies support so-" -"called configuration functions. A configuration function is a function " -"that the built-in strategy calls to get the configuration dictionary for " -"the current round. It then forwards the configuration dictionary to all " -"the clients selected during that round." +"built-in strategy like ``FedAvg``. Built-in strategies support so-called " +"configuration functions. A configuration function is a function that the " +"built-in strategy calls to get the configuration dictionary for the " +"current round. It then forwards the configuration dictionary to all the " +"clients selected during that round." msgstr "" "向客户端发送配置值的最简单方法是使用内置策略,如 " ":code:`FedAvg`。内置策略支持所谓的配置函数。配置函数是内置策略调用的函数,用于获取当前轮的配置字典。然后,它会将配置字典转发给该轮中选择的所有客户端。" -#: ../../source/how-to-configure-clients.rst:34 +#: ../../source/how-to-configure-clients.rst:49 msgid "" "Let's start with a simple example. Imagine we want to send (a) the batch " "size that the client should use, (b) the current global round of " @@ -6066,18 +6289,19 @@ msgid "" "side. Our configuration function could look like this:" msgstr "让我们从一个简单的例子开始。想象一下,我们想要发送给客户端(a)应该使用的批次大小,(b)当前联邦学习的全局轮次,以及(c)客户端训练的遍历数。我们的配置函数可以是这样的:" -#: ../../source/how-to-configure-clients.rst:47 +#: ../../source/how-to-configure-clients.rst:65 +#, fuzzy msgid "" "To make the built-in strategies use this function, we can pass it to " "``FedAvg`` during initialization using the parameter " -":code:`on_fit_config_fn`:" +"``on_fit_config_fn``:" msgstr "为了让内置策略使用这个函数,我们可以在初始化时使用参数 :code:`on_fit_config_fn` 将它传递给 ``FedAvg`` :" -#: ../../source/how-to-configure-clients.rst:56 +#: ../../source/how-to-configure-clients.rst:75 msgid "One the client side, we receive the configuration dictionary in ``fit``:" msgstr "在客户端,我们在 ``fit`` 中接收配置字典:" -#: ../../source/how-to-configure-clients.rst:67 +#: ../../source/how-to-configure-clients.rst:86 msgid "" "There is also an `on_evaluate_config_fn` to configure evaluation, which " "works the same way. They are separate functions because one might want to" @@ -6087,7 +6311,7 @@ msgstr "" "还有一个 `on_evaluate_config_fn` 用于配置评估,其工作方式相同。它们是不同的函数,因为可能需要向 `evaluate` " "发送不同的配置值(例如,使用不同的批量大小)。" -#: ../../source/how-to-configure-clients.rst:69 +#: ../../source/how-to-configure-clients.rst:90 msgid "" "The built-in strategies call this function every round (that is, every " "time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " @@ -6100,30 +6324,31 @@ msgstr "" "`Strategy.configure_evaluate` 时)。每轮调用 `on_evaluate_config_fn` " "允许我们在连续几轮中改变配置指令。例如,如果我们想实现一个超参数时间表,以增加后几轮的本地遍历次数,我们可以这样做:" -#: ../../source/how-to-configure-clients.rst:82 -msgid "The :code:`FedAvg` strategy will call this function *every round*." +#: ../../source/how-to-configure-clients.rst:107 +#, fuzzy +msgid "The ``FedAvg`` strategy will call this function *every round*." msgstr "代码:`FedAvg`策略*每轮*都会调用该函数。" -#: ../../source/how-to-configure-clients.rst:85 +#: ../../source/how-to-configure-clients.rst:110 msgid "Configuring individual clients" msgstr "配置个别客户端" -#: ../../source/how-to-configure-clients.rst:87 +#: ../../source/how-to-configure-clients.rst:112 msgid "" "In some cases, it is necessary to send different configuration values to " "different clients." msgstr "在某些情况下,有必要向不同的客户端发送不同的配置值。" -#: ../../source/how-to-configure-clients.rst:89 +#: ../../source/how-to-configure-clients.rst:115 #, fuzzy msgid "" "This can be achieved by customizing an existing strategy or by " ":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" -" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" -" to the config dict of a *single client* (only the first client in the " -"list, the other clients in this round to not receive this \"special\" " -"config value):" +"strategies>`. Here's a nonsensical example that customizes ``FedAvg`` by " +"adding a custom ``\"hello\": \"world\"`` configuration key/value pair to " +"the config dict of a *single client* (only the first client in the list, " +"the other clients in this round to not receive this \"special\" config " +"value):" msgstr "" "这可以通过定制现有策略或 `从头开始实施一个定制策略 `_" " function. For example:" @@ -6175,49 +6402,50 @@ msgstr "" "`_" " 函数。例如:" -#: ../../source/how-to-configure-logging.rst:53 +#: ../../source/how-to-configure-logging.rst:59 +#, fuzzy msgid "" "With the above, Flower will record the log you see on your terminal to " -":code:`log.txt`. This file will be created in the same directory as were " -"you are running the code from. If we inspect we see the log above is also" -" recorded but prefixing with :code:`identifier` each line:" +"``log.txt``. This file will be created in the same directory as were you " +"are running the code from. If we inspect we see the log above is also " +"recorded but prefixing with ``identifier`` each line:" msgstr "" "通过上述操作,Flower 会将您在终端上看到的日志记录到 " ":code:`log.txt`。该文件将创建在运行代码的同一目录下。如果我们检查一下,就会发现上面的日志也被记录了下来,但每一行都以 " ":code:`identifier` 作为前缀:" -#: ../../source/how-to-configure-logging.rst:74 +#: ../../source/how-to-configure-logging.rst:81 msgid "Log your own messages" msgstr "记录自己的信息" -#: ../../source/how-to-configure-logging.rst:76 +#: ../../source/how-to-configure-logging.rst:83 msgid "" "You might expand the information shown by default with the Flower logger " "by adding more messages relevant to your application. You can achieve " "this easily as follows." msgstr "您可以通过添加更多与应用程序相关的信息来扩展 Flower 日志记录器默认显示的信息。您可以通过以下方法轻松实现这一目标。" -#: ../../source/how-to-configure-logging.rst:102 +#: ../../source/how-to-configure-logging.rst:114 msgid "" "In this way your logger will show, in addition to the default messages, " "the ones introduced by the clients as specified above." msgstr "这样,除默认信息外,您的日志记录器还将显示由客户引入的信息,如上文所述。" -#: ../../source/how-to-configure-logging.rst:128 +#: ../../source/how-to-configure-logging.rst:140 msgid "Log to a remote service" msgstr "登录远程服务" -#: ../../source/how-to-configure-logging.rst:130 +#: ../../source/how-to-configure-logging.rst:142 +#, fuzzy msgid "" -"The :code:`fl.common.logger.configure` function, also allows specifying a" -" host to which logs can be pushed (via :code:`POST`) through a native " -"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" -" feature in :code:`gRPC`-based Federated Learning workloads where " -"otherwise gathering logs from all entities (i.e. the server and the " -"clients) might be cumbersome. Note that in Flower simulation, the server " -"automatically displays all logs. You can still specify a " -":code:`HTTPHandler` should you wish to backup or analyze the logs " -"somewhere else." +"The ``fl.common.logger.configure`` function, also allows specifying a " +"host to which logs can be pushed (via ``POST``) through a native Python " +"``logging.handler.HTTPHandler``. This is a particularly useful feature in" +" ``gRPC``-based Federated Learning workloads where otherwise gathering " +"logs from all entities (i.e. the server and the clients) might be " +"cumbersome. Note that in Flower simulation, the server automatically " +"displays all logs. You can still specify a ``HTTPHandler`` should you " +"wish to backup or analyze the logs somewhere else." msgstr "" "此外,:code:`fl.common.logger.configure`函数还允许指定主机,通过本地 Python " ":code:`logging.handler.HTTPHandler`,向该主机推送日志(通过 :code:`POST`)。在基于 " @@ -6232,11 +6460,11 @@ msgstr "启用 SSL 连接" #, fuzzy msgid "" "This guide describes how to a SSL-enabled secure Flower server " -"(:code:`SuperLink`) can be started and how a Flower client " -"(:code:`SuperNode`) can establish a secure connections to it." +"(``SuperLink``) can be started and how a Flower client (``SuperNode``) " +"can establish a secure connections to it." msgstr "本指南介绍如何启动启用 SSL 的安全 Flower 服务器,以及 Flower 客户端如何与其建立安全连接。" -#: ../../source/how-to-enable-ssl-connections.rst:7 +#: ../../source/how-to-enable-ssl-connections.rst:8 msgid "" "A complete code example demonstrating a secure connection can be found " "`here `_ 。" -#: ../../source/how-to-enable-ssl-connections.rst:10 +#: ../../source/how-to-enable-ssl-connections.rst:11 #, fuzzy msgid "" -"The code example comes with a :code:`README.md` file which explains how " -"to start it. Although it is already SSL-enabled, it might be less " +"The code example comes with a ``README.md`` file which explains how to " +"start it. Although it is already SSL-enabled, it might be less " "descriptive on how it does so. Stick to this guide for a deeper " "introduction to the topic." msgstr "代码示例附带的 README.md 文件将解释如何启动它。虽然它已经启用了 SSL,但对如何启用可能描述较少。请参考本指南,了解更深入的相关介绍。" @@ -6264,19 +6492,21 @@ msgid "" "Using SSL-enabled connections requires certificates to be passed to the " "server and client. For the purpose of this guide we are going to generate" " self-signed certificates. As this can become quite complex we are going " -"to ask you to run the script in :code:`examples/advanced-" -"tensorflow/certificates/generate.sh` with the following command sequence:" +"to ask you to run the script in ``examples/advanced-" +"tensorflow/certificates/generate.sh`` with the following command " +"sequence:" msgstr "" "使用支持 SSL 的连接需要向服务器和客户端传递证书。在本指南中,我们将生成自签名证书。由于这可能会变得相当复杂,我们将要求你运行 " ":code:`examples/advanced-tensorflow/certificates/generate.sh` 中的脚本" #: ../../source/how-to-enable-ssl-connections.rst:29 +#, fuzzy msgid "" -"This will generate the certificates in :code:`examples/advanced-" -"tensorflow/.cache/certificates`." +"This will generate the certificates in ``examples/advanced-" +"tensorflow/.cache/certificates``." msgstr "这将在 :code:`examples/advanced-tensorflow/.cache/certificates` 中生成证书。" -#: ../../source/how-to-enable-ssl-connections.rst:31 +#: ../../source/how-to-enable-ssl-connections.rst:32 #, fuzzy msgid "" "The approach for generating SSL certificates in the context of this " @@ -6288,19 +6518,19 @@ msgid "" "generated using the scripts mentioned in this guide." msgstr "本示例中生成 SSL 证书的方法可作为启发和起点,但不应被视为生产环境的完整方法。有关在生产环境中正确生成证书的问题,请参考其他资料。" -#: ../../source/how-to-enable-ssl-connections.rst:39 +#: ../../source/how-to-enable-ssl-connections.rst:40 #, fuzzy msgid "Server (SuperLink)" msgstr "flower-superlink" -#: ../../source/how-to-enable-ssl-connections.rst:41 +#: ../../source/how-to-enable-ssl-connections.rst:42 #, fuzzy msgid "" "Use the following terminal command to start a sever (SuperLink) that uses" " the previously generated certificates:" msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" -#: ../../source/how-to-enable-ssl-connections.rst:50 +#: ../../source/how-to-enable-ssl-connections.rst:52 #, fuzzy msgid "" "When providing certificates, the server expects a tuple of three " @@ -6308,28 +6538,28 @@ msgid "" " key." msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" -#: ../../source/how-to-enable-ssl-connections.rst:54 +#: ../../source/how-to-enable-ssl-connections.rst:56 #, fuzzy msgid "Client (SuperNode)" msgstr "客户端状态代码。" -#: ../../source/how-to-enable-ssl-connections.rst:56 +#: ../../source/how-to-enable-ssl-connections.rst:58 #, fuzzy msgid "" "Use the following terminal command to start a client (SuperNode) that " "uses the previously generated certificates:" msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" -#: ../../source/how-to-enable-ssl-connections.rst:64 +#: ../../source/how-to-enable-ssl-connections.rst:67 #, fuzzy msgid "" -"When setting :code:`root_certificates`, the client expects a file path to" -" PEM-encoded root certificates." +"When setting ``root_certificates``, the client expects a file path to " +"PEM-encoded root certificates." msgstr "" "当设置 :code:`root_certificates` 时,客户端希望 PEM 编码的根证书是字节字符串。我们再次使用 " ":code:`Path` 来简化以字节字符串形式读取证书的过程。" -#: ../../source/how-to-enable-ssl-connections.rst:70 +#: ../../source/how-to-enable-ssl-connections.rst:73 #, fuzzy msgid "" "You should now have learned how to generate self-signed certificates " @@ -6337,21 +6567,21 @@ msgid "" "establish a secure connection to it." msgstr "现在,你应该已经学会了如何使用给定的脚本生成自签名证书、启动启用 SSL 的服务器并让客户端与其建立安全连接。" -#: ../../source/how-to-enable-ssl-connections.rst:75 +#: ../../source/how-to-enable-ssl-connections.rst:78 msgid "Additional resources" msgstr "补充资源" -#: ../../source/how-to-enable-ssl-connections.rst:77 +#: ../../source/how-to-enable-ssl-connections.rst:80 msgid "" "These additional sources might be relevant if you would like to dive " "deeper into the topic of certificates:" msgstr "如果您想更深入地了解证书主题,这些额外的资料来源可能有帮助:" -#: ../../source/how-to-enable-ssl-connections.rst:79 +#: ../../source/how-to-enable-ssl-connections.rst:83 msgid "`Let's Encrypt `_" msgstr "`让我们加密 `_" -#: ../../source/how-to-enable-ssl-connections.rst:80 +#: ../../source/how-to-enable-ssl-connections.rst:84 msgid "`certbot `_" msgstr "`certbot `_" @@ -6372,13 +6602,15 @@ msgstr "" " 提供了一些内置策略,这些策略基于下文所述的相同 API。" #: ../../source/how-to-implement-strategies.rst:11 -msgid "The :code:`Strategy` abstraction" +#, fuzzy +msgid "The ``Strategy`` abstraction" msgstr ":code:`策略 ` 抽象类" #: ../../source/how-to-implement-strategies.rst:13 +#, fuzzy msgid "" "All strategy implementation are derived from the abstract base class " -":code:`flwr.server.strategy.Strategy`, both built-in implementations and " +"``flwr.server.strategy.Strategy``, both built-in implementations and " "third party implementations. This means that custom strategy " "implementations have the exact same capabilities at their disposal as " "built-in ones." @@ -6392,58 +6624,63 @@ msgid "" "implemented:" msgstr "策略抽象定义了一些需要实现的抽象方法:" -#: ../../source/how-to-implement-strategies.rst:75 +#: ../../source/how-to-implement-strategies.rst:67 +#, fuzzy msgid "" -"Creating a new strategy means implementing a new :code:`class` (derived " -"from the abstract base class :code:`Strategy`) that implements for the " -"previously shown abstract methods:" +"Creating a new strategy means implementing a new ``class`` (derived from " +"the abstract base class ``Strategy``) that implements for the previously " +"shown abstract methods:" msgstr "创建一个新策略意味着要实现一个新的 :code:`class`(从抽象基类 :code:`Strategy` 派生),该类要实现前面显示的抽象方法:" -#: ../../source/how-to-implement-strategies.rst:100 +#: ../../source/how-to-implement-strategies.rst:97 msgid "The Flower server calls these methods in the following order:" msgstr "Flower 服务器按以下顺序调用这些方法:" -#: ../../source/how-to-implement-strategies.rst:177 +#: ../../source/how-to-implement-strategies.rst:174 msgid "The following sections describe each of those methods in more detail." msgstr "下文将详细介绍每种方法。" -#: ../../source/how-to-implement-strategies.rst:180 -msgid "The :code:`initialize_parameters` method" +#: ../../source/how-to-implement-strategies.rst:177 +#, fuzzy +msgid "The ``initialize_parameters`` method" msgstr ":code:`初始化参数` 方法" -#: ../../source/how-to-implement-strategies.rst:182 +#: ../../source/how-to-implement-strategies.rst:179 +#, fuzzy msgid "" -":code:`initialize_parameters` is called only once, at the very beginning " -"of an execution. It is responsible for providing the initial global model" -" parameters in a serialized form (i.e., as a :code:`Parameters` object)." +"``initialize_parameters`` is called only once, at the very beginning of " +"an execution. It is responsible for providing the initial global model " +"parameters in a serialized form (i.e., as a ``Parameters`` object)." msgstr "" ":code:`initialize_parameters` 只调用一次,即在执行开始时。它负责以序列化形式(即 " ":code:`Parameters` 对象)提供初始全局模型参数。" -#: ../../source/how-to-implement-strategies.rst:184 +#: ../../source/how-to-implement-strategies.rst:183 +#, fuzzy msgid "" "Built-in strategies return user-provided initial parameters. The " "following example shows how initial parameters can be passed to " -":code:`FedAvg`:" +"``FedAvg``:" msgstr "内置策略会返回用户提供的初始参数。下面的示例展示了如何将初始参数传递给 :code:`FedAvg`:" #: ../../source/how-to-implement-strategies.rst:209 +#, fuzzy msgid "" -"The Flower server will call :code:`initialize_parameters`, which either " -"returns the parameters that were passed to :code:`initial_parameters`, or" -" :code:`None`. If no parameters are returned from " -":code:`initialize_parameters` (i.e., :code:`None`), the server will " -"randomly select one client and ask it to provide its parameters. This is " -"a convenience feature and not recommended in practice, but it can be " -"useful for prototyping. In practice, it is recommended to always use " -"server-side parameter initialization." +"The Flower server will call ``initialize_parameters``, which either " +"returns the parameters that were passed to ``initial_parameters``, or " +"``None``. If no parameters are returned from ``initialize_parameters`` " +"(i.e., ``None``), the server will randomly select one client and ask it " +"to provide its parameters. This is a convenience feature and not " +"recommended in practice, but it can be useful for prototyping. In " +"practice, it is recommended to always use server-side parameter " +"initialization." msgstr "" "Flower 服务器将调用 :code:`initialize_parameters`,返回传给 " ":code:`initial_parameters` 的参数或 :code:`None`。如果 " ":code:`initialize_parameters` 没有返回任何参数(即 " ":code:`None`),服务器将随机选择一个客户端并要求其提供参数。这只是一个便捷的功能,在实际应用中并不推荐使用,但在原型开发中可能很有用。在实践中,建议始终使用服务器端参数初始化。" -#: ../../source/how-to-implement-strategies.rst:213 +#: ../../source/how-to-implement-strategies.rst:218 msgid "" "Server-side parameter initialization is a powerful mechanism. It can be " "used, for example, to resume training from a previously saved checkpoint." @@ -6452,210 +6689,228 @@ msgid "" " learning." msgstr "服务器端参数初始化是一种强大的机制。例如,它可以用来从先前保存的检查点恢复训练。它也是实现混合方法所需的基本能力,例如,使用联邦学习对预先训练好的模型进行微调。" -#: ../../source/how-to-implement-strategies.rst:216 -msgid "The :code:`configure_fit` method" +#: ../../source/how-to-implement-strategies.rst:224 +#, fuzzy +msgid "The ``configure_fit`` method" msgstr ":code:`configure_fit`方法" -#: ../../source/how-to-implement-strategies.rst:218 +#: ../../source/how-to-implement-strategies.rst:226 +#, fuzzy msgid "" -":code:`configure_fit` is responsible for configuring the upcoming round " -"of training. What does *configure* mean in this context? Configuring a " -"round means selecting clients and deciding what instructions to send to " -"these clients. The signature of :code:`configure_fit` makes this clear:" +"``configure_fit`` is responsible for configuring the upcoming round of " +"training. What does *configure* mean in this context? Configuring a round" +" means selecting clients and deciding what instructions to send to these " +"clients. The signature of ``configure_fit`` makes this clear:" msgstr "" ":code:`configure_fit` " "负责配置即将开始的一轮训练。*配置*在这里是什么意思?配置一轮训练意味着选择客户并决定向这些客户发送什么指令。:code:`configure_fit`" " 说明了这一点:" -#: ../../source/how-to-implement-strategies.rst:231 +#: ../../source/how-to-implement-strategies.rst:239 +#, fuzzy msgid "" "The return value is a list of tuples, each representing the instructions " "that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_fit`:" +"usually perform the following steps in ``configure_fit``:" msgstr "返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :code:`configure_fit` 中执行以下步骤:" -#: ../../source/how-to-implement-strategies.rst:233 -#: ../../source/how-to-implement-strategies.rst:280 +#: ../../source/how-to-implement-strategies.rst:243 +#: ../../source/how-to-implement-strategies.rst:307 +#, fuzzy msgid "" -"Use the :code:`client_manager` to randomly sample all (or a subset of) " -"available clients (each represented as a :code:`ClientProxy` object)" +"Use the ``client_manager`` to randomly sample all (or a subset of) " +"available clients (each represented as a ``ClientProxy`` object)" msgstr "" "使用 :code:`client_manager` 随机抽样所有(或部分)可用客户端(每个客户端都表示为 :code:`ClientProxy` " "对象)" -#: ../../source/how-to-implement-strategies.rst:234 +#: ../../source/how-to-implement-strategies.rst:245 +#, fuzzy msgid "" -"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " -"current global model :code:`parameters` and :code:`config` dict" +"Pair each ``ClientProxy`` with the same ``FitIns`` holding the current " +"global model ``parameters`` and ``config`` dict" msgstr "" "将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:`config` " "dict 的 :code:`FitIns` 配对" -#: ../../source/how-to-implement-strategies.rst:236 +#: ../../source/how-to-implement-strategies.rst:248 #, fuzzy msgid "" -"More sophisticated implementations can use :code:`configure_fit` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_fit`." +"More sophisticated implementations can use ``configure_fit`` to implement" +" custom client selection logic. A client will only participate in a round" +" if the corresponding ``ClientProxy`` is included in the list returned " +"from ``configure_fit``." msgstr "" "更复杂的实现可以使用 :code:`configure_fit` 来实现自定义的客户端选择逻辑。只有当相应的 " ":code:`ClientProxy` 包含在 :code:`configure_fit` 返回的列表中时,客户端才会参与进来。" -#: ../../source/how-to-implement-strategies.rst:240 +#: ../../source/how-to-implement-strategies.rst:254 +#, fuzzy msgid "" "The structure of this return value provides a lot of flexibility to the " "user. Since instructions are defined on a per-client basis, different " "instructions can be sent to each client. This enables custom strategies " "to train, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" "该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向每个客户端发送不同的指令。这使得自定义策略成为可能,例如在不同的客户端上训练不同的模型,或在不同的客户端上使用不同的超参数(通过" " :code:`config` dict)。" -#: ../../source/how-to-implement-strategies.rst:243 -msgid "The :code:`aggregate_fit` method" +#: ../../source/how-to-implement-strategies.rst:261 +#, fuzzy +msgid "The ``aggregate_fit`` method" msgstr ":code:`aggregate_fit` 方法" -#: ../../source/how-to-implement-strategies.rst:245 +#: ../../source/how-to-implement-strategies.rst:263 +#, fuzzy msgid "" -":code:`aggregate_fit` is responsible for aggregating the results returned" -" by the clients that were selected and asked to train in " -":code:`configure_fit`." +"``aggregate_fit`` is responsible for aggregating the results returned by " +"the clients that were selected and asked to train in ``configure_fit``." msgstr ":code:`aggregate_fit` 负责汇总在 :code:`configure_fit` 中选择并要求训练的客户端所返回的结果。" -#: ../../source/how-to-implement-strategies.rst:258 +#: ../../source/how-to-implement-strategies.rst:277 +#, fuzzy msgid "" "Of course, failures can happen, so there is no guarantee that the server " "will get results from all the clients it sent instructions to (via " -":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " -"of :code:`results`, but also a list of :code:`failures`." +"``configure_fit``). ``aggregate_fit`` therefore receives a list of " +"``results``, but also a list of ``failures``." msgstr "" "当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 :code:`configure_fit`)的所有客户端获得结果。因此 " ":code:`aggregate_fit` 会收到 :code:`results` 的列表,但也会收到 :code:`failures` 的列表。" -#: ../../source/how-to-implement-strategies.rst:260 +#: ../../source/how-to-implement-strategies.rst:282 +#, fuzzy msgid "" -":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" -" dictionary of aggregated metrics. The :code:`Parameters` return value is" -" optional because :code:`aggregate_fit` might decide that the results " -"provided are not sufficient for aggregation (e.g., too many failures)." +"``aggregate_fit`` returns an optional ``Parameters`` object and a " +"dictionary of aggregated metrics. The ``Parameters`` return value is " +"optional because ``aggregate_fit`` might decide that the results provided" +" are not sufficient for aggregation (e.g., too many failures)." msgstr "" ":code:`aggregate_fit` 返回一个可选的 :code:`Parameters` " "对象和一个聚合度量的字典。:code:`Parameters` 返回值是可选的,因为 :code:`aggregate_fit` " "可能会认为所提供的结果不足以进行聚合(例如,失败次数过多)。" -#: ../../source/how-to-implement-strategies.rst:263 -msgid "The :code:`configure_evaluate` method" +#: ../../source/how-to-implement-strategies.rst:288 +#, fuzzy +msgid "The ``configure_evaluate`` method" msgstr ":code:`configure_evaluate`方法" -#: ../../source/how-to-implement-strategies.rst:265 +#: ../../source/how-to-implement-strategies.rst:290 +#, fuzzy msgid "" -":code:`configure_evaluate` is responsible for configuring the upcoming " -"round of evaluation. What does *configure* mean in this context? " -"Configuring a round means selecting clients and deciding what " -"instructions to send to these clients. The signature of " -":code:`configure_evaluate` makes this clear:" +"``configure_evaluate`` is responsible for configuring the upcoming round " +"of evaluation. What does *configure* mean in this context? Configuring a " +"round means selecting clients and deciding what instructions to send to " +"these clients. The signature of ``configure_evaluate`` makes this clear:" msgstr "" ":code:`configure_evaluate` " "负责配置下一轮评估。*配置*在这里是什么意思?配置一轮评估意味着选择客户端并决定向这些客户端发送什么指令。:code:`configure_evaluate`" " 说明了这一点:" -#: ../../source/how-to-implement-strategies.rst:278 +#: ../../source/how-to-implement-strategies.rst:303 +#, fuzzy msgid "" "The return value is a list of tuples, each representing the instructions " "that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_evaluate`:" +"usually perform the following steps in ``configure_evaluate``:" msgstr "返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :code:`configure_evaluate` 中执行以下步骤:" -#: ../../source/how-to-implement-strategies.rst:281 +#: ../../source/how-to-implement-strategies.rst:309 +#, fuzzy msgid "" -"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " -"the current global model :code:`parameters` and :code:`config` dict" +"Pair each ``ClientProxy`` with the same ``EvaluateIns`` holding the " +"current global model ``parameters`` and ``config`` dict" msgstr "" "将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:`config` " "dict 的 :code:`EvaluateIns` 配对" -#: ../../source/how-to-implement-strategies.rst:283 +#: ../../source/how-to-implement-strategies.rst:312 #, fuzzy msgid "" -"More sophisticated implementations can use :code:`configure_evaluate` to " +"More sophisticated implementations can use ``configure_evaluate`` to " "implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_evaluate`." +"in a round if the corresponding ``ClientProxy`` is included in the list " +"returned from ``configure_evaluate``." msgstr "" "更复杂的实现可以使用 :code:`configure_evaluate` 来实现自定义的客户端选择逻辑。只有当相应的 " ":code:`ClientProxy` 包含在 :code:`configure_evaluate` 返回的列表中时,客户端才会参与进来。" -#: ../../source/how-to-implement-strategies.rst:287 +#: ../../source/how-to-implement-strategies.rst:318 +#, fuzzy msgid "" "The structure of this return value provides a lot of flexibility to the " "user. Since instructions are defined on a per-client basis, different " "instructions can be sent to each client. This enables custom strategies " "to evaluate, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" "该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向每个客户端发送不同的指令。这使得自定义策略可以在不同客户端上评估不同的模型,或在不同客户端上使用不同的超参数(通过" " :code:`config` dict)。" -#: ../../source/how-to-implement-strategies.rst:291 -msgid "The :code:`aggregate_evaluate` method" +#: ../../source/how-to-implement-strategies.rst:325 +#, fuzzy +msgid "The ``aggregate_evaluate`` method" msgstr ":code:`aggregate_evaluate` 方法" -#: ../../source/how-to-implement-strategies.rst:293 +#: ../../source/how-to-implement-strategies.rst:327 +#, fuzzy msgid "" -":code:`aggregate_evaluate` is responsible for aggregating the results " +"``aggregate_evaluate`` is responsible for aggregating the results " "returned by the clients that were selected and asked to evaluate in " -":code:`configure_evaluate`." +"``configure_evaluate``." msgstr "" ":code:`aggregate_evaluate` 负责汇总在 :code:`configure_evaluate` " "中选择并要求评估的客户端返回的结果。" -#: ../../source/how-to-implement-strategies.rst:306 +#: ../../source/how-to-implement-strategies.rst:341 +#, fuzzy msgid "" "Of course, failures can happen, so there is no guarantee that the server " "will get results from all the clients it sent instructions to (via " -":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " -"receives a list of :code:`results`, but also a list of :code:`failures`." +"``configure_evaluate``). ``aggregate_evaluate`` therefore receives a list" +" of ``results``, but also a list of ``failures``." msgstr "" "当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 " ":code:`configure_evaluate`)的所有客户端获得结果。因此, :code:`aggregate_evaluate` 会接收 " ":code:`results` 的列表,但也会接收 :code:`failures` 的列表。" -#: ../../source/how-to-implement-strategies.rst:308 +#: ../../source/how-to-implement-strategies.rst:346 +#, fuzzy msgid "" -":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" -" dictionary of aggregated metrics. The :code:`float` return value is " -"optional because :code:`aggregate_evaluate` might decide that the results" -" provided are not sufficient for aggregation (e.g., too many failures)." +"``aggregate_evaluate`` returns an optional ``float`` (loss) and a " +"dictionary of aggregated metrics. The ``float`` return value is optional " +"because ``aggregate_evaluate`` might decide that the results provided are" +" not sufficient for aggregation (e.g., too many failures)." msgstr "" ":code:`aggregate_evaluate` 返回一个可选的 " ":code:`float`(损失值)和一个聚合指标字典。:code:`float` 返回值是可选的,因为 " ":code:`aggregate_evaluate` 可能会认为所提供的结果不足以进行聚合(例如,失败次数过多)。" -#: ../../source/how-to-implement-strategies.rst:311 -msgid "The :code:`evaluate` method" +#: ../../source/how-to-implement-strategies.rst:352 +#, fuzzy +msgid "The ``evaluate`` method" msgstr ":code:`evaluate`方法" -#: ../../source/how-to-implement-strategies.rst:313 +#: ../../source/how-to-implement-strategies.rst:354 +#, fuzzy msgid "" -":code:`evaluate` is responsible for evaluating model parameters on the " -"server-side. Having :code:`evaluate` in addition to " -":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " -"to perform both servers-side and client-side (federated) evaluation." +"``evaluate`` is responsible for evaluating model parameters on the " +"server-side. Having ``evaluate`` in addition to " +"``configure_evaluate``/``aggregate_evaluate`` enables strategies to " +"perform both servers-side and client-side (federated) evaluation." msgstr "" ":code:`evaluate` 负责在服务器端评估模型参数。除了 " ":code:`configure_evaluate`/:code:`aggregate_evaluate` 之外,:code:`evaluate`" " 可以使策略同时执行服务器端和客户端(联邦)评估。" -#: ../../source/how-to-implement-strategies.rst:323 +#: ../../source/how-to-implement-strategies.rst:364 +#, fuzzy msgid "" "The return value is again optional because the strategy might not need to" " implement server-side evaluation or because the user-defined " -":code:`evaluate` method might not complete successfully (e.g., it might " -"fail to load the server-side evaluation data)." +"``evaluate`` method might not complete successfully (e.g., it might fail " +"to load the server-side evaluation data)." msgstr "" "返回值也是可选的,因为策略可能不需要执行服务器端评估,或者因为用户定义的 :code:`evaluate` " "方法可能无法成功完成(例如,它可能无法加载服务器端评估数据)。" @@ -6664,66 +6919,66 @@ msgstr "" msgid "Install Flower" msgstr "安装Flower" -#: ../../source/how-to-install-flower.rst:6 +#: ../../source/how-to-install-flower.rst:5 msgid "Python version" msgstr "Python 版本" -#: ../../source/how-to-install-flower.rst:12 +#: ../../source/how-to-install-flower.rst:11 msgid "Install stable release" msgstr "安装稳定版" -#: ../../source/how-to-install-flower.rst:15 -#: ../../source/how-to-upgrade-to-flower-next.rst:46 +#: ../../source/how-to-install-flower.rst:14 +#: ../../source/how-to-upgrade-to-flower-next.rst:66 #, fuzzy msgid "Using pip" msgstr "使用 pip" -#: ../../source/how-to-install-flower.rst:17 -msgid "" -"Stable releases are available on `PyPI " -"`_::" +#: ../../source/how-to-install-flower.rst:16 +#, fuzzy +msgid "Stable releases are available on `PyPI `_:" msgstr "稳定版本可在 `PyPI `_::" -#: ../../source/how-to-install-flower.rst:21 +#: ../../source/how-to-install-flower.rst:22 +#, fuzzy msgid "" "For simulations that use the Virtual Client Engine, ``flwr`` should be " -"installed with the ``simulation`` extra::" +"installed with the ``simulation`` extra:" msgstr "对于使用虚拟客户端引擎的模拟,`flwr`` 应与`simulation`` 一起安装:" -#: ../../source/how-to-install-flower.rst:27 +#: ../../source/how-to-install-flower.rst:30 #, fuzzy msgid "Using conda (or mamba)" msgstr "使用 conda(或 mamba)" -#: ../../source/how-to-install-flower.rst:29 +#: ../../source/how-to-install-flower.rst:32 #, fuzzy msgid "Flower can also be installed from the ``conda-forge`` channel." msgstr "Flower 也可以从 ``conda-forge`` 频道安装。" -#: ../../source/how-to-install-flower.rst:31 +#: ../../source/how-to-install-flower.rst:34 #, fuzzy msgid "" "If you have not added ``conda-forge`` to your channels, you will first " -"need to run the following::" +"need to run the following:" msgstr "如果您尚未在频道中添加 ``conda-forge``,则首先需要运行以下程序::" -#: ../../source/how-to-install-flower.rst:36 +#: ../../source/how-to-install-flower.rst:42 #, fuzzy msgid "" "Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " -"installed with ``conda``::" +"installed with ``conda``:" msgstr "一旦启用了 ``conda-forge`` 频道,就可以使用 ``conda``: 安装 ``flwr``:" -#: ../../source/how-to-install-flower.rst:40 +#: ../../source/how-to-install-flower.rst:49 #, fuzzy -msgid "or with ``mamba``::" +msgid "or with ``mamba``:" msgstr "或用 ``mamba`` ::" -#: ../../source/how-to-install-flower.rst:46 +#: ../../source/how-to-install-flower.rst:56 msgid "Verify installation" msgstr "验证安装" -#: ../../source/how-to-install-flower.rst:48 +#: ../../source/how-to-install-flower.rst:58 #, fuzzy msgid "" "The following command can be used to verify if Flower was successfully " @@ -6731,53 +6986,57 @@ msgid "" " the command line:" msgstr "可以使用以下命令来验证 Flower 是否安装成功。如果一切正常,它将在命令行中打印 Flower 的版本::" -#: ../../source/how-to-install-flower.rst:58 +#: ../../source/how-to-install-flower.rst:68 msgid "Advanced installation options" msgstr "高级安装选项" -#: ../../source/how-to-install-flower.rst:61 +#: ../../source/how-to-install-flower.rst:71 #, fuzzy msgid "Install via Docker" msgstr "安装Flower" -#: ../../source/how-to-install-flower.rst:63 +#: ../../source/how-to-install-flower.rst:73 #, fuzzy msgid ":doc:`Run Flower using Docker `" msgstr "" "`TensorFlow快速入门 (教程) `_" -#: ../../source/how-to-install-flower.rst:66 +#: ../../source/how-to-install-flower.rst:76 msgid "Install pre-release" msgstr "安装预发布版本" -#: ../../source/how-to-install-flower.rst:68 +#: ../../source/how-to-install-flower.rst:78 +#, fuzzy msgid "" "New (possibly unstable) versions of Flower are sometimes available as " "pre-release versions (alpha, beta, release candidate) before the stable " -"release happens::" +"release happens:" msgstr "在稳定版发布之前,Flower 的新版本(可能是不稳定版)有时会作为预发布版本(alpha、beta、候选发布版本)提供::" -#: ../../source/how-to-install-flower.rst:72 +#: ../../source/how-to-install-flower.rst:85 +#, fuzzy msgid "" "For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" -" should be installed with the ``simulation`` extra::" +" should be installed with the ``simulation`` extra:" msgstr "对于使用虚拟客户端引擎的模拟,`flwr``预发行版应与`simulation``一起安装:" -#: ../../source/how-to-install-flower.rst:77 +#: ../../source/how-to-install-flower.rst:93 msgid "Install nightly release" msgstr "安装隔夜版本" -#: ../../source/how-to-install-flower.rst:79 +#: ../../source/how-to-install-flower.rst:95 +#, fuzzy msgid "" "The latest (potentially unstable) changes in Flower are available as " -"nightly releases::" +"nightly releases:" msgstr "Flower 中最新(可能不稳定)的更改以隔夜发布的形式提供::" -#: ../../source/how-to-install-flower.rst:83 +#: ../../source/how-to-install-flower.rst:101 +#, fuzzy msgid "" "For simulations that use the Virtual Client Engine, ``flwr-nightly`` " -"should be installed with the ``simulation`` extra::" +"should be installed with the ``simulation`` extra:" msgstr "对于使用虚拟客户端引擎的模拟,`flwr-nightly`应与`simulation`一起安装:" #: ../../source/how-to-monitor-simulation.rst:2 @@ -6795,17 +7054,17 @@ msgstr "" "Flower 允许您在运行模拟时监控系统资源。此外,Flower " "仿真引擎功能强大,能让您决定如何按客户端方式分配资源并限制总使用量。从资源消耗中获得的观察可以帮助您做出更明智的决策,并加快执行时间。" -#: ../../source/how-to-monitor-simulation.rst:6 +#: ../../source/how-to-monitor-simulation.rst:9 msgid "" "The specific instructions assume you are using macOS and have the " "`Homebrew `_ package manager installed." msgstr "具体说明假定你使用的是 macOS,并且安装了 `Homebrew `_ 软件包管理器。" -#: ../../source/how-to-monitor-simulation.rst:10 +#: ../../source/how-to-monitor-simulation.rst:13 msgid "Downloads" msgstr "下载" -#: ../../source/how-to-monitor-simulation.rst:16 +#: ../../source/how-to-monitor-simulation.rst:19 msgid "" "`Prometheus `_ is used for data collection, while" " `Grafana `_ will enable you to visualize the " @@ -6816,89 +7075,90 @@ msgstr "" "`_ 则能让你将收集到的数据可视化。它们都与 Flower 在引擎下使用的 `Ray " "`_ 紧密集成。" -#: ../../source/how-to-monitor-simulation.rst:18 +#: ../../source/how-to-monitor-simulation.rst:23 msgid "" "Overwrite the configuration files (depending on your device, it might be " "installed on a different path)." msgstr "重写配置文件(根据设备的不同,可能安装在不同的路径上)。" -#: ../../source/how-to-monitor-simulation.rst:20 +#: ../../source/how-to-monitor-simulation.rst:26 msgid "If you are on an M1 Mac, it should be:" msgstr "如果你使用的是 M1 Mac,应该是这样:" -#: ../../source/how-to-monitor-simulation.rst:27 +#: ../../source/how-to-monitor-simulation.rst:33 msgid "On the previous generation Intel Mac devices, it should be:" msgstr "在上一代英特尔 Mac 设备上,应该是这样:" -#: ../../source/how-to-monitor-simulation.rst:34 +#: ../../source/how-to-monitor-simulation.rst:40 msgid "" "Open the respective configuration files and change them. Depending on " "your device, use one of the two following commands:" msgstr "打开相应的配置文件并修改它们。根据设备情况,使用以下两个命令之一:" -#: ../../source/how-to-monitor-simulation.rst:44 +#: ../../source/how-to-monitor-simulation.rst:51 msgid "" "and then delete all the text in the file and paste a new Prometheus " "config you see below. You may adjust the time intervals to your " "requirements:" msgstr "然后删除文件中的所有文本,粘贴一个新的 Prometheus 配置文件,如下所示。您可以根据需要调整时间间隔:" -#: ../../source/how-to-monitor-simulation.rst:59 +#: ../../source/how-to-monitor-simulation.rst:67 msgid "" "Now after you have edited the Prometheus configuration, do the same with " "the Grafana configuration files. Open those using one of the following " "commands as before:" msgstr "编辑完 Prometheus 配置后,请对 Grafana 配置文件执行同样的操作。与之前一样,使用以下命令之一打开这些文件:" -#: ../../source/how-to-monitor-simulation.rst:69 +#: ../../source/how-to-monitor-simulation.rst:78 msgid "" "Your terminal editor should open and allow you to apply the following " "configuration as before." msgstr "您的终端编辑器应该会打开,并允许您像之前一样应用以下配置。" -#: ../../source/how-to-monitor-simulation.rst:84 +#: ../../source/how-to-monitor-simulation.rst:94 msgid "" "Congratulations, you just downloaded all the necessary software needed " "for metrics tracking. Now, let’s start it." msgstr "恭喜您,您刚刚下载了指标跟踪所需的所有软件。现在,让我们开始吧。" -#: ../../source/how-to-monitor-simulation.rst:88 +#: ../../source/how-to-monitor-simulation.rst:98 msgid "Tracking metrics" msgstr "跟踪指标" -#: ../../source/how-to-monitor-simulation.rst:90 +#: ../../source/how-to-monitor-simulation.rst:100 msgid "" "Before running your Flower simulation, you have to start the monitoring " "tools you have just installed and configured." msgstr "在运行 Flower 模拟之前,您必须启动刚刚安装和配置的监控工具。" -#: ../../source/how-to-monitor-simulation.rst:97 +#: ../../source/how-to-monitor-simulation.rst:108 msgid "" "Please include the following argument in your Python code when starting a" " simulation." msgstr "开始模拟时,请在 Python 代码中加入以下参数。" -#: ../../source/how-to-monitor-simulation.rst:108 +#: ../../source/how-to-monitor-simulation.rst:119 msgid "Now, you are ready to start your workload." msgstr "现在,您可以开始工作了。" -#: ../../source/how-to-monitor-simulation.rst:110 +#: ../../source/how-to-monitor-simulation.rst:121 msgid "" "Shortly after the simulation starts, you should see the following logs in" " your terminal:" msgstr "模拟启动后不久,您就会在终端中看到以下日志:" -#: ../../source/how-to-monitor-simulation.rst:117 -msgid "You can look at everything at ``_ ." +#: ../../source/how-to-monitor-simulation.rst:127 +#, fuzzy +msgid "You can look at everything at http://127.0.0.1:8265 ." msgstr "您可以在 ``_ 查看所有内容。" -#: ../../source/how-to-monitor-simulation.rst:119 +#: ../../source/how-to-monitor-simulation.rst:129 msgid "" "It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" " lowest option)." msgstr "这是一个 Ray Dashboard。您可以导航到 \"度量标准\"(左侧面板,最低选项)。" -#: ../../source/how-to-monitor-simulation.rst:121 +#: ../../source/how-to-monitor-simulation.rst:132 msgid "" "Or alternatively, you can just see them in Grafana by clicking on the " "right-up corner, “View in Grafana”. Please note that the Ray dashboard is" @@ -6910,24 +7170,25 @@ msgstr "" "仪表盘只能在模拟期间访问。模拟结束后,您只能使用 Grafana 浏览指标。您可以访问 ``http://localhost:3000/``启动 " "Grafana。" -#: ../../source/how-to-monitor-simulation.rst:123 +#: ../../source/how-to-monitor-simulation.rst:137 +#, fuzzy msgid "" "After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port :code:`3000` on " -"your machine as long as they are running." +"important as they will otherwise block, for example port ``3000`` on your" +" machine as long as they are running." msgstr "完成可视化后,请停止 Prometheus 和 Grafana。这一点很重要,否则只要它们在运行,就会阻塞机器上的端口 :code:`3000`。" -#: ../../source/how-to-monitor-simulation.rst:132 +#: ../../source/how-to-monitor-simulation.rst:147 msgid "Resource allocation" msgstr "资源分配" -#: ../../source/how-to-monitor-simulation.rst:134 +#: ../../source/how-to-monitor-simulation.rst:149 msgid "" "You must understand how the Ray library works to efficiently allocate " "system resources to simulation clients on your own." msgstr "您必须了解 Ray 库是如何工作的,才能有效地为自己的仿真客户端分配系统资源。" -#: ../../source/how-to-monitor-simulation.rst:136 +#: ../../source/how-to-monitor-simulation.rst:152 msgid "" "Initially, the simulation (which Ray handles under the hood) starts by " "default with all the available resources on the system, which it shares " @@ -6939,94 +7200,96 @@ msgstr "" "最初,模拟(由 Ray " "在引擎下处理)默认使用系统上的所有可用资源启动,并在客户端之间共享。但这并不意味着它会将资源平均分配给所有客户端,也不意味着模型训练会在所有客户端同时进行。您将在本博客的后半部分了解到更多相关信息。您可以运行以下命令检查系统资源:" -#: ../../source/how-to-monitor-simulation.rst:143 +#: ../../source/how-to-monitor-simulation.rst:164 msgid "In Google Colab, the result you see might be similar to this:" msgstr "在 Google Colab 中,您看到的结果可能与此类似:" -#: ../../source/how-to-monitor-simulation.rst:155 +#: ../../source/how-to-monitor-simulation.rst:175 msgid "" "However, you can overwrite the defaults. When starting a simulation, do " "the following (you don't need to overwrite all of them):" msgstr "不过,您可以覆盖默认值。开始模拟时,请执行以下操作(不必全部覆盖):" -#: ../../source/how-to-monitor-simulation.rst:175 +#: ../../source/how-to-monitor-simulation.rst:195 msgid "Let’s also specify the resource for a single client." msgstr "我们还可以为单个客户指定资源。" -#: ../../source/how-to-monitor-simulation.rst:205 +#: ../../source/how-to-monitor-simulation.rst:225 msgid "" "Now comes the crucial part. Ray will start a new client only when it has " "all the required resources (such that they run in parallel) when the " "resources allow." msgstr "现在到了关键部分。只有在资源允许的情况下,Ray 才会在拥有所有所需资源(如并行运行)时启动新客户端。" -#: ../../source/how-to-monitor-simulation.rst:207 +#: ../../source/how-to-monitor-simulation.rst:228 +#, fuzzy msgid "" "In the example above, only one client will be run, so your clients won't " -"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " -"running two clients and therefore enable them to run concurrently. Be " -"careful not to require more resources than available. If you specified " -":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " -"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." +"run concurrently. Setting ``client_num_gpus = 0.5`` would allow running " +"two clients and therefore enable them to run concurrently. Be careful not" +" to require more resources than available. If you specified " +"``client_num_gpus = 2``, the simulation wouldn't start (even if you had 2" +" GPUs but decided to set 1 in ``ray_init_args``)." msgstr "" "在上面的示例中,将只运行一个客户端,因此您的客户端不会并发运行。设置 :code:`client_num_gpus = 0.5` " "将允许运行两个客户端,从而使它们能够并发运行。请注意,所需的资源不要超过可用资源。如果您指定 :code:`client_num_gpus = " "2`,模拟将无法启动(即使您有 2 个 GPU,但决定在 :code:`ray_init_args` 中设置为 1)。" -#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 +#: ../../source/how-to-monitor-simulation.rst:235 ../../source/ref-faq.rst:2 msgid "FAQ" msgstr "常见问题" -#: ../../source/how-to-monitor-simulation.rst:214 +#: ../../source/how-to-monitor-simulation.rst:237 msgid "Q: I don't see any metrics logged." msgstr "问:我没有看到任何指标记录。" -#: ../../source/how-to-monitor-simulation.rst:216 +#: ../../source/how-to-monitor-simulation.rst:239 msgid "" "A: The timeframe might not be properly set. The setting is in the top " "right corner (\"Last 30 minutes\" by default). Please change the " "timeframe to reflect the period when the simulation was running." msgstr "答:时间范围可能没有正确设置。设置在右上角(默认为 \"最后 30 分钟\")。请更改时间框架,以反映模拟运行的时间段。" -#: ../../source/how-to-monitor-simulation.rst:218 +#: ../../source/how-to-monitor-simulation.rst:243 msgid "" "Q: I see “Grafana server not detected. Please make sure the Grafana " "server is running and refresh this page” after going to the Metrics tab " "in Ray Dashboard." msgstr "问:我看到 \"未检测到 Grafana 服务器。请确保 Grafana 服务器正在运行并刷新此页面\"。" -#: ../../source/how-to-monitor-simulation.rst:220 +#: ../../source/how-to-monitor-simulation.rst:246 msgid "" "A: You probably don't have Grafana running. Please check the running " "services" msgstr "答:您可能没有运行 Grafana。请检查正在运行的服务" -#: ../../source/how-to-monitor-simulation.rst:226 +#: ../../source/how-to-monitor-simulation.rst:252 +#, fuzzy msgid "" "Q: I see \"This site can't be reached\" when going to " -"``_." +"http://127.0.0.1:8265." msgstr "问:在访问 ``_时,我看到 \"无法访问该网站\"。" -#: ../../source/how-to-monitor-simulation.rst:228 +#: ../../source/how-to-monitor-simulation.rst:254 msgid "" "A: Either the simulation has already finished, or you still need to start" " Prometheus." msgstr "答:要么模拟已经完成,要么您还需要启动Prometheus。" -#: ../../source/how-to-monitor-simulation.rst:232 +#: ../../source/how-to-monitor-simulation.rst:257 msgid "Resources" msgstr "资源" -#: ../../source/how-to-monitor-simulation.rst:234 +#: ../../source/how-to-monitor-simulation.rst:259 #, fuzzy msgid "" -"Ray Dashboard: ``_" +"Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-" +"started.html" msgstr "Ray 仪表盘: ``_" -#: ../../source/how-to-monitor-simulation.rst:236 +#: ../../source/how-to-monitor-simulation.rst:261 #, fuzzy -msgid "Ray Metrics: ``_" +msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" msgstr "" "Ray 指标: ``_" @@ -7055,23 +7318,24 @@ msgstr "" "FL 工作负载的一些案例。Flower 可以通过其 \"虚拟客户端引擎\"(VirtualClientEngine)_或 VCE 来匹配这些情况。" -#: ../../source/how-to-run-simulations.rst:10 +#: ../../source/how-to-run-simulations.rst:19 +#, fuzzy msgid "" -"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" -" clients. These clients are identical to `non-virtual` clients (i.e. the " +"The ``VirtualClientEngine`` schedules, launches and manages `virtual` " +"clients. These clients are identical to `non-virtual` clients (i.e. the " "ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " "creating a class inheriting, for example, from `flwr.client.NumPyClient " "`_ and therefore behave in an " "identical way. In addition to that, clients managed by the " -":code:`VirtualClientEngine` are:" +"``VirtualClientEngine`` are:" msgstr "" ":code:`VirtualClientEngine`用来规划,启动和管理`虚拟`客户端。这些客户端跟`非虚拟`客户端是一样的(即为您通过`flwr.client.start_client" " `_启动的客户端),因为它们可以通过创建一个继承自 " "`flwr.client.NumPyClient `_ " "的类进行配置,因此其行为方式相同。另外,由 `VirtualClientEngine` 管理的客户端有:" -#: ../../source/how-to-run-simulations.rst:12 +#: ../../source/how-to-run-simulations.rst:26 msgid "" "resource-aware: this means that each client gets assigned a portion of " "the compute and memory on your system. You as a user can control this at " @@ -7082,14 +7346,15 @@ msgstr "" "资源感知:这意味着每个客户端都会分配到系统中的一部分计算和内存。作为用户,您可以在模拟开始时对其进行控制,从而控制 Flower FL " "模拟的并行程度。每个客户端的资源越少,在同一硬件上并发运行的客户端就越多。" -#: ../../source/how-to-run-simulations.rst:13 +#: ../../source/how-to-run-simulations.rst:31 +#, fuzzy msgid "" "self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " +" manually, instead this gets delegated to ``VirtualClientEngine``'s " "internals." msgstr "自管理:这意味着用户无需手动启动客户端,而是由 :code:`VirtualClientEngine` 负责。" -#: ../../source/how-to-run-simulations.rst:14 +#: ../../source/how-to-run-simulations.rst:33 msgid "" "ephemeral: this means that a client is only materialized when it is " "required in the FL process (e.g. to do `fit() `_ " ")。之后该对象将被销毁,释放分配给它的资源,并允许其他客户端以这种方式参与。" -#: ../../source/how-to-run-simulations.rst:16 +#: ../../source/how-to-run-simulations.rst:38 +#, fuzzy msgid "" -"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " +"The ``VirtualClientEngine`` implements `virtual` clients using `Ray " "`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " -"of `Actors `_ to " -"spawn `virtual` clients and run their workload." +"workloads. In particular, Flower's ``VirtualClientEngine`` makes use of " +"`Actors `_ to spawn " +"`virtual` clients and run their workload." msgstr "" ":code:`VirtualClientEngine`使用`Ray " "`_来实现`虚拟`客户端,这是一个用于可扩展 Python 工作负载的开源框架。特别地,Flower 的" " :code:`VirtualClientEngine` 使用 `Actors `_ 来生成 `virtual` 客户端并运行它们的工作负载。" -#: ../../source/how-to-run-simulations.rst:20 +#: ../../source/how-to-run-simulations.rst:45 msgid "Launch your Flower simulation" msgstr "启动 Flower 模拟" -#: ../../source/how-to-run-simulations.rst:22 +#: ../../source/how-to-run-simulations.rst:47 msgid "" "Running Flower simulations still require you to define your client class," " a strategy, and utility functions to download and load (and potentially " @@ -7131,22 +7397,23 @@ msgstr "" "\"start_simulation `_\" 来启动模拟了,一个最简单的示例如下:" -#: ../../source/how-to-run-simulations.rst:44 +#: ../../source/how-to-run-simulations.rst:73 msgid "VirtualClientEngine resources" msgstr "虚拟客户端引擎资源" -#: ../../source/how-to-run-simulations.rst:45 +#: ../../source/how-to-run-simulations.rst:75 +#, fuzzy msgid "" "By default the VCE has access to all system resources (i.e. all CPUs, all" " GPUs, etc) since that is also the default behavior when starting Ray. " "However, in some settings you might want to limit how many of your system" " resources are used for simulation. You can do this via the " -":code:`ray_init_args` input argument to :code:`start_simulation` which " -"the VCE internally passes to Ray's :code:`ray.init` command. For a " -"complete list of settings you can configure check the `ray.init " +"``ray_init_args`` input argument to ``start_simulation`` which the VCE " +"internally passes to Ray's ``ray.init`` command. For a complete list of " +"settings you can configure check the `ray.init " "`_" -" documentation. Do not set :code:`ray_init_args` if you want the VCE to " -"use all your system's CPUs and GPUs." +" documentation. Do not set ``ray_init_args`` if you want the VCE to use " +"all your system's CPUs and GPUs." msgstr "" "默认情况下,VCE 可以访问所有系统资源(即所有 CPU、所有 GPU 等),因为这也是启动 Ray " "时的默认行为。不过,在某些设置中,您可能希望限制有多少系统资源用于模拟。您可以通过 :code:`ray_init_args` 输入到 " @@ -7155,20 +7422,21 @@ msgstr "" "core/api/doc/ray.init.html#ray-init>`_ 文档。如果希望 VCE 使用系统中所有的 CPU 和 " "GPU,请不要设置 :code:`ray_init_args`。" -#: ../../source/how-to-run-simulations.rst:62 +#: ../../source/how-to-run-simulations.rst:97 msgid "Assigning client resources" msgstr "分配客户端资源" -#: ../../source/how-to-run-simulations.rst:63 +#: ../../source/how-to-run-simulations.rst:99 +#, fuzzy msgid "" -"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" -" nothing else) to each virtual client. This means that if your system has" -" 10 cores, that many virtual clients can be concurrently running." +"By default the ``VirtualClientEngine`` assigns a single CPU core (and " +"nothing else) to each virtual client. This means that if your system has " +"10 cores, that many virtual clients can be concurrently running." msgstr "" "默认情况下,:code:`VirtualClientEngine` 会为每个虚拟客户端分配一个 CPU " "内核(不分配其他任何内核)。这意味着,如果系统有 10 个内核,那么可以同时运行这么多虚拟客户端。" -#: ../../source/how-to-run-simulations.rst:65 +#: ../../source/how-to-run-simulations.rst:103 msgid "" "More often than not, you would probably like to adjust the resources your" " clients get assigned based on the complexity (i.e. compute and memory " @@ -7183,31 +7451,32 @@ msgstr "" "flwr.html#flwr.simulation.start_simulation>`_ 。Ray " "内部使用两个键来调度和生成工作负载(在我们的例子中是 Flower 客户端):" -#: ../../source/how-to-run-simulations.rst:67 -msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +#: ../../source/how-to-run-simulations.rst:110 +#, fuzzy +msgid "``num_cpus`` indicates the number of CPU cores a client would get." msgstr ":code:`num_cpus` 表示客户端将获得的 CPU 内核数量。" -#: ../../source/how-to-run-simulations.rst:68 -msgid "" -":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " -"assigned." +#: ../../source/how-to-run-simulations.rst:111 +#, fuzzy +msgid "``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned." msgstr ":code:`num_gpus` 表示分配给客户端的 GPU 内存的**比例**。" -#: ../../source/how-to-run-simulations.rst:70 +#: ../../source/how-to-run-simulations.rst:113 msgid "Let's see a few examples:" msgstr "让我们来看几个例子:" -#: ../../source/how-to-run-simulations.rst:89 +#: ../../source/how-to-run-simulations.rst:132 +#, fuzzy msgid "" -"While the :code:`client_resources` can be used to control the degree of " +"While the ``client_resources`` can be used to control the degree of " "concurrency in your FL simulation, this does not stop you from running " "dozens, hundreds or even thousands of clients in the same round and " "having orders of magnitude more `dormant` (i.e. not participating in a " "round) clients. Let's say you want to have 100 clients per round but your" " system can only accommodate 8 clients concurrently. The " -":code:`VirtualClientEngine` will schedule 100 jobs to run (each " -"simulating a client sampled by the strategy) and then will execute them " -"in a resource-aware manner in batches of 8." +"``VirtualClientEngine`` will schedule 100 jobs to run (each simulating a " +"client sampled by the strategy) and then will execute them in a resource-" +"aware manner in batches of 8." msgstr "" "虽然 :code:`client_resources` 可用来控制 FL " "模拟的并发程度,但这并不能阻止您在同一轮模拟中运行几十、几百甚至上千个客户端,并拥有数量级更多的 " @@ -7215,7 +7484,7 @@ msgstr "" "个客户端。:code:`VirtualClientEngine` 将安排运行 100 " "个工作(每个工作模拟策略采样的一个客户端),然后以资源感知的方式分批执行。" -#: ../../source/how-to-run-simulations.rst:91 +#: ../../source/how-to-run-simulations.rst:140 msgid "" "To understand all the intricate details on how resources are used to " "schedule FL clients and how to define custom resources, please take a " @@ -7225,11 +7494,11 @@ msgstr "" "要了解资源如何用于调度 FL 客户端以及如何定义自定义资源的所有复杂细节,请查看 `Ray 文档 " "`_。" -#: ../../source/how-to-run-simulations.rst:94 +#: ../../source/how-to-run-simulations.rst:145 msgid "Simulation examples" msgstr "模拟示例" -#: ../../source/how-to-run-simulations.rst:96 +#: ../../source/how-to-run-simulations.rst:147 msgid "" "A few ready-to-run complete examples for Flower simulation in " "Tensorflow/Keras and PyTorch are provided in the `Flower repository " @@ -7238,7 +7507,7 @@ msgstr "" "在 Tensorflow/Keras 和 PyTorch 中进行 Flower 模拟的几个可随时运行的完整示例已在 `Flower 库 " "`_ 中提供。您也可以在 Google Colab 上运行它们:" -#: ../../source/how-to-run-simulations.rst:98 +#: ../../source/how-to-run-simulations.rst:151 msgid "" "`Tensorflow/Keras Simulation " "`_:100个客户端在MNIST上协作训练一个MLP模型。" -#: ../../source/how-to-run-simulations.rst:99 +#: ../../source/how-to-run-simulations.rst:154 msgid "" "`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " @@ -7256,118 +7525,123 @@ msgstr "" "PyTorch 模拟 `_:100 个客户端在 MNIST 上协作训练一个 CNN 模型。" -#: ../../source/how-to-run-simulations.rst:104 +#: ../../source/how-to-run-simulations.rst:159 msgid "Multi-node Flower simulations" msgstr "多节点 Flower 模拟" -#: ../../source/how-to-run-simulations.rst:106 +#: ../../source/how-to-run-simulations.rst:161 +#, fuzzy msgid "" -"Flower's :code:`VirtualClientEngine` allows you to run FL simulations " -"across multiple compute nodes. Before starting your multi-node simulation" -" ensure that you:" +"Flower's ``VirtualClientEngine`` allows you to run FL simulations across " +"multiple compute nodes. Before starting your multi-node simulation ensure" +" that you:" msgstr "Flower 的 :code:`VirtualClientEngine` 允许您在多个计算节点上运行 FL 模拟。在开始多节点模拟之前,请确保:" -#: ../../source/how-to-run-simulations.rst:108 +#: ../../source/how-to-run-simulations.rst:164 msgid "Have the same Python environment in all nodes." msgstr "所有节点都有相同的 Python 环境。" -#: ../../source/how-to-run-simulations.rst:109 +#: ../../source/how-to-run-simulations.rst:165 msgid "Have a copy of your code (e.g. your entire repo) in all nodes." msgstr "在所有节点上都有一份代码副本(例如整个软件包)。" -#: ../../source/how-to-run-simulations.rst:110 +#: ../../source/how-to-run-simulations.rst:166 msgid "" "Have a copy of your dataset in all nodes (more about this in " ":ref:`simulation considerations `)" msgstr "在所有节点中都有一份数据集副本(更多相关信息请参阅 :ref:`模拟注意事项`)" -#: ../../source/how-to-run-simulations.rst:111 +#: ../../source/how-to-run-simulations.rst:168 +#, fuzzy msgid "" -"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " -"`_ so the " -":code:`VirtualClientEngine` attaches to a running Ray instance." +"Pass ``ray_init_args={\"address\"=\"auto\"}`` to `start_simulation `_ so the " +"``VirtualClientEngine`` attaches to a running Ray instance." msgstr "" "将 :code:`ray_init_args={\"address\"=\"auto\"}`传递给 `start_simulation `_ ,这样 " ":code:`VirtualClientEngine`就会连接到正在运行的 Ray 实例。" -#: ../../source/how-to-run-simulations.rst:112 +#: ../../source/how-to-run-simulations.rst:171 +#, fuzzy msgid "" -"Start Ray on you head node: on the terminal type :code:`ray start " -"--head`. This command will print a few lines, one of which indicates how " -"to attach other nodes to the head node." +"Start Ray on you head node: on the terminal type ``ray start --head``. " +"This command will print a few lines, one of which indicates how to attach" +" other nodes to the head node." msgstr "" "在头部节点上启动 Ray:在终端上输入 :code:`raystart--" "head`。该命令将打印几行输出,其中一行说明如何将其他节点连接到头部节点。" -#: ../../source/how-to-run-simulations.rst:113 +#: ../../source/how-to-run-simulations.rst:174 +#, fuzzy msgid "" "Attach other nodes to the head node: copy the command shown after " "starting the head and execute it on terminal of a new node: for example " -":code:`ray start --address='192.168.1.132:6379'`" +"``ray start --address='192.168.1.132:6379'``" msgstr "" "将其他节点附加到头部节点:复制启动头部后显示的命令,并在新节点的终端上执行:例如 :code:`ray start " "--address='192.168.1.132:6379'`" -#: ../../source/how-to-run-simulations.rst:115 +#: ../../source/how-to-run-simulations.rst:178 msgid "" "With all the above done, you can run your code from the head node as you " "would if the simulation was running on a single node." msgstr "完成上述所有操作后,您就可以在头部节点上运行代码了,就像在单个节点上运行模拟一样。" -#: ../../source/how-to-run-simulations.rst:117 +#: ../../source/how-to-run-simulations.rst:181 +#, fuzzy msgid "" "Once your simulation is finished, if you'd like to dismantle your cluster" -" you simply need to run the command :code:`ray stop` in each node's " -"terminal (including the head node)." +" you simply need to run the command ``ray stop`` in each node's terminal " +"(including the head node)." msgstr "模拟结束后,如果要拆除集群,只需在每个节点(包括头部节点)的终端运行 :code:`ray stop` 命令即可。" -#: ../../source/how-to-run-simulations.rst:120 +#: ../../source/how-to-run-simulations.rst:185 msgid "Multi-node simulation good-to-know" msgstr "了解多节点模拟" -#: ../../source/how-to-run-simulations.rst:122 +#: ../../source/how-to-run-simulations.rst:187 msgid "" "Here we list a few interesting functionality when running multi-node FL " "simulations:" msgstr "在此,我们列举了运行多节点 FL 模拟时的一些有趣功能:" -#: ../../source/how-to-run-simulations.rst:124 +#: ../../source/how-to-run-simulations.rst:189 +#, fuzzy msgid "" -"User :code:`ray status` to check all nodes connected to your head node as" -" well as the total resources available to the " -":code:`VirtualClientEngine`." +"User ``ray status`` to check all nodes connected to your head node as " +"well as the total resources available to the ``VirtualClientEngine``." msgstr "" "使用 :code:`ray status` 查看连接到头部节点的所有节点,以及 :code:`VirtualClientEngine` " "可用的总资源。" -#: ../../source/how-to-run-simulations.rst:126 +#: ../../source/how-to-run-simulations.rst:192 +#, fuzzy msgid "" "When attaching a new node to the head, all its resources (i.e. all CPUs, " "all GPUs) will be visible by the head node. This means that the " -":code:`VirtualClientEngine` can schedule as many `virtual` clients as " -"that node can possible run. In some settings you might want to exclude " -"certain resources from the simulation. You can do this by appending " -"`--num-cpus=` and/or `--num-" -"gpus=` in any :code:`ray start` command (including " -"when starting the head)" +"``VirtualClientEngine`` can schedule as many `virtual` clients as that " +"node can possible run. In some settings you might want to exclude certain" +" resources from the simulation. You can do this by appending `--num-" +"cpus=` and/or `--num-gpus=` in " +"any ``ray start`` command (including when starting the head)" msgstr "" "将新节点附加到头部节点时,头部节点将可见其所有资源(即所有 CPU 和 GPU)。这意味着 :code:`VirtualClientEngine`" " 可以调度尽可能多的 \"虚拟 \"客户端来运行该节点。在某些设置中,您可能希望将某些资源排除在模拟之外。为此,您可以在任何 :code:`ray" " start` 命令(包括启动头部时)中添加 `--num-cpus=`和/或 `--num-" "gpus=`" -#: ../../source/how-to-run-simulations.rst:132 +#: ../../source/how-to-run-simulations.rst:202 msgid "Considerations for simulations" msgstr "模拟的注意事项" -#: ../../source/how-to-run-simulations.rst:135 +#: ../../source/how-to-run-simulations.rst:206 msgid "" "We are actively working on these fronts so to make it trivial to run any " "FL workload with Flower simulation." msgstr "我们正在积极开展这些方面的工作,以便使 FL 工作负载与 Flower 模拟的运行变得轻而易举。" -#: ../../source/how-to-run-simulations.rst:138 +#: ../../source/how-to-run-simulations.rst:209 msgid "" "The current VCE allows you to run Federated Learning workloads in " "simulation mode whether you are prototyping simple scenarios on your " @@ -7381,36 +7655,38 @@ msgstr "" " FL情景。虽然我们为 VCE 增加了更多的功能,但以下几点强调了在使用 Flower 设计 FL " "时需要注意的一些事项。我们还强调了我们的实现中目前存在的一些局限性。" -#: ../../source/how-to-run-simulations.rst:141 +#: ../../source/how-to-run-simulations.rst:217 msgid "GPU resources" msgstr "GPU 资源" -#: ../../source/how-to-run-simulations.rst:143 +#: ../../source/how-to-run-simulations.rst:219 +#, fuzzy msgid "" "The VCE assigns a share of GPU memory to a client that specifies the key " -":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " +"``num_gpus`` in ``client_resources``. This being said, Ray (used " "internally by the VCE) is by default:" msgstr "" "VCE 会为指定 :code:`client_resources` 中 :code:`num_gpus` 关键字的客户端分配 GPU " "内存份额。也就是说,Ray(VCE 内部使用)是默认的:" -#: ../../source/how-to-run-simulations.rst:146 +#: ../../source/how-to-run-simulations.rst:222 +#, fuzzy msgid "" "not aware of the total VRAM available on the GPUs. This means that if you" -" set :code:`num_gpus=0.5` and you have two GPUs in your system with " -"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" -" concurrently." +" set ``num_gpus=0.5`` and you have two GPUs in your system with different" +" (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients " +"concurrently." msgstr "" "不知道 GPU 上可用的总 VRAM。这意味着,如果您设置 :code:`num_gpus=0.5`,而系统中有两个不同(如 32GB 和 " "8GB)VRAM 的 GPU,它们都将同时运行 2 个客户端。" -#: ../../source/how-to-run-simulations.rst:147 +#: ../../source/how-to-run-simulations.rst:225 msgid "" "not aware of other unrelated (i.e. not created by the VCE) workloads are " "running on the GPU. Two takeaways from this are:" msgstr "不知道 GPU 上正在运行其他无关(即不是由 VCE 创建)的工作负载。从中可以得到以下两点启示:" -#: ../../source/how-to-run-simulations.rst:149 +#: ../../source/how-to-run-simulations.rst:228 msgid "" "Your Flower server might need a GPU to evaluate the `global model` after " "aggregation (by instance when making use of the `evaluate method `_时)" -#: ../../source/how-to-run-simulations.rst:150 +#: ../../source/how-to-run-simulations.rst:231 +#, fuzzy msgid "" "If you want to run several independent Flower simulations on the same " "machine you need to mask-out your GPUs with " -":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " -"experiment." +"``CUDA_VISIBLE_DEVICES=\"\"`` when launching your experiment." msgstr "" "如果您想在同一台机器上运行多个独立的 Flower 模拟,则需要在启动实验时使用 " ":code:`CUDA_VISIBLE_DEVICES=\"\"` 屏蔽 GPU。" -#: ../../source/how-to-run-simulations.rst:153 +#: ../../source/how-to-run-simulations.rst:235 +#, fuzzy msgid "" -"In addition, the GPU resource limits passed to :code:`client_resources` " -"are not `enforced` (i.e. they can be exceeded) which can result in the " +"In addition, the GPU resource limits passed to ``client_resources`` are " +"not `enforced` (i.e. they can be exceeded) which can result in the " "situation of client using more VRAM than the ratio specified when " "starting the simulation." msgstr "" "此外,传递给 :code:`client_resources` 的 GPU 资源限制并不是 \"强制 \"的(即可以超出),这可能导致客户端使用的" " VRAM 超过启动模拟时指定的比例。" -#: ../../source/how-to-run-simulations.rst:156 +#: ../../source/how-to-run-simulations.rst:240 msgid "TensorFlow with GPUs" msgstr "使用 GPU 的 TensorFlow" -#: ../../source/how-to-run-simulations.rst:158 +#: ../../source/how-to-run-simulations.rst:242 msgid "" "When `using a GPU with TensorFlow " "`_ nearly your entire GPU memory of" @@ -7459,20 +7736,21 @@ msgstr "" "\"客户端,这并不是一个理想的机制。幸运的是,我们可以通过 `启用内存增长 " "`_来禁用这一默认行为。" -#: ../../source/how-to-run-simulations.rst:160 +#: ../../source/how-to-run-simulations.rst:249 +#, fuzzy msgid "" "This would need to be done in the main process (which is where the server" " would run) and in each Actor created by the VCE. By means of " -":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " -"in order to specify a function to be executed upon actor initialization. " -"In this case, to enable GPU growth for TF workloads. It would look as " +"``actor_kwargs`` we can pass the reserved key `\"on_actor_init_fn\"` in " +"order to specify a function to be executed upon actor initialization. In " +"this case, to enable GPU growth for TF workloads. It would look as " "follows:" msgstr "" "这需要在主进程(也就是服务器运行的地方)和 VCE 创建的每个角色中完成。通过 " ":code:`actor_kwargs`,我们可以传递保留关键字`\"on_actor_init_fn\"`,以指定在角色初始化时执行的函数。在本例中,为了使" " TF 工作负载的 GPU 增长,它看起来如下:" -#: ../../source/how-to-run-simulations.rst:179 +#: ../../source/how-to-run-simulations.rst:272 msgid "" "This is precisely the mechanism used in `Tensorflow/Keras Simulation " "`_\"示例中使用的机制。" -#: ../../source/how-to-run-simulations.rst:183 +#: ../../source/how-to-run-simulations.rst:276 msgid "Multi-node setups" msgstr "多节点设置" -#: ../../source/how-to-run-simulations.rst:185 +#: ../../source/how-to-run-simulations.rst:278 msgid "" "The VCE does not currently offer a way to control on which node a " "particular `virtual` client is executed. In other words, if more than a " @@ -7504,7 +7782,7 @@ msgstr "" "进程的稍后阶段(即在另一轮中),同一客户端可以由不同的节点执行。根据客户访问数据集的方式,这可能需要在所有节点上复制所有数据集分区,或采用数据集服务机制(如使用" " nfs 或数据库)来避免数据重复。" -#: ../../source/how-to-run-simulations.rst:187 +#: ../../source/how-to-run-simulations.rst:286 msgid "" "By definition virtual clients are `stateless` due to their ephemeral " "nature. A client state can be implemented as part of the Flower client " @@ -7534,17 +7812,17 @@ msgid "Model checkpointing" msgstr "模型检查点" #: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +#, fuzzy msgid "" "Model updates can be persisted on the server-side by customizing " -":code:`Strategy` methods. Implementing custom strategies is always an " -"option, but for many cases it may be more convenient to simply customize " -"an existing strategy. The following code example defines a new " -":code:`SaveModelStrategy` which customized the existing built-in " -":code:`FedAvg` strategy. In particular, it customizes " -":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " -"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" -" before it returns those aggregated weights to the caller (i.e., the " -"server):" +"``Strategy`` methods. Implementing custom strategies is always an option," +" but for many cases it may be more convenient to simply customize an " +"existing strategy. The following code example defines a new " +"``SaveModelStrategy`` which customized the existing built-in ``FedAvg`` " +"strategy. In particular, it customizes ``aggregate_fit`` by calling " +"``aggregate_fit`` in the base class (``FedAvg``). It then continues to " +"save returned (aggregated) weights before it returns those aggregated " +"weights to the caller (i.e., the server):" msgstr "" "模型更新可通过自定义 :code:`Strategy` " "方法在服务器端持久化。实现自定义策略始终是一种选择,但在许多情况下,简单地自定义现有策略可能更方便。下面的代码示例定义了一个新的 " @@ -7552,11 +7830,11 @@ msgstr "" "策略。特别是,它通过调用基类(:code:`FedAvg`)中的 :code:`aggregate_fit` 来定制 " ":code:`aggregate_fit`。然后继续保存返回的(聚合)参数,然后再将这些聚合参数返回给调用者(即服务器):" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:53 msgid "Save and load PyTorch checkpoints" msgstr "保存和加载 PyTorch 检查点" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:55 msgid "" "Similar to the previous example but with a few extra steps, we'll show " "how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " @@ -7569,14 +7847,14 @@ msgstr "" "函数。首先,``aggregate_fit`` 返回一个 ``Parameters`` 对象,它必须被转换成一个 NumPy " "``ndarray`` 的列表,然后这些对象按照 ``OrderedDict`` 类结构被转换成 PyTorch `state_dict` 对象。" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:98 msgid "" "To load your progress, you simply append the following lines to your " "code. Note that this will iterate over all saved checkpoints and load the" " latest one:" msgstr "要加载进度,只需在代码中添加以下几行。请注意,这将遍历所有已保存的检查点,并加载最新的检查点:" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:111 #, fuzzy msgid "" "Return/use this object of type ``Parameters`` wherever necessary, such as" @@ -7599,34 +7877,34 @@ msgstr "" "Flower 1.0 正式发布。除了新功能,Flower 1.0 还为未来的发展奠定了稳定的基础。与 Flower 0.19(以及其他 0.x " "系列版本)相比,有一些破坏性改动需要修改现有 0.x 系列项目的代码。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 -#: ../../source/how-to-upgrade-to-flower-next.rst:43 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-upgrade-to-flower-next.rst:63 msgid "Install update" msgstr "安装更新" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 msgid "" "Here's how to update an existing installation to Flower 1.0 using either " "pip or Poetry:" msgstr "下面介绍如何使用 pip 或 Poetry 将现有安装更新到 Flower 1.0:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 msgid "pip: add ``-U`` when installing." msgstr "pip: 安装时添加 ``-U``." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:16 msgid "" "``python -m pip install -U flwr`` (when using ``start_server`` and " "``start_client``)" msgstr "`python -m pip install -U flwr``(当使用`start_server`和`start_client`时)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 msgid "" "``python -m pip install -U 'flwr[simulation]'`` (when using " "``start_simulation``)" msgstr "``python -m pip install -U 'flwr[simulation]'``(当使用`start_simulation``时)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 msgid "" "Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " "reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " @@ -7635,11 +7913,11 @@ msgstr "" "Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` 依赖包,然后重新安装(运行 ``poetry install``" " 前,别忘了通过 ``rm poetry.lock` 删除 ``poetry.lock`)。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" msgstr "``flwr = \"^1.0.0\"`` (当使用 ``start_server` 和 ``start_client` 时)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 msgid "" "``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " "using ``start_simulation``)" @@ -7647,32 +7925,32 @@ msgstr "" "``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] " "}``(当使用``start_simulation``时)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 -#: ../../source/how-to-upgrade-to-flower-next.rst:100 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 +#: ../../source/how-to-upgrade-to-flower-next.rst:120 msgid "Required changes" msgstr "所需变更" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:30 msgid "The following breaking changes require manual updates." msgstr "以下更改需要手动更新。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 msgid "General" msgstr "一般情况" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:35 msgid "" "Pass all arguments as keyword arguments (not as positional arguments). " "Here's an example:" msgstr "将所有参数作为关键字参数传递(而不是位置参数)。下面是一个例子:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:38 msgid "" "Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " "FlowerClient())``" msgstr "Flower 0.19 (位置参数): ``start_client(\"127.0.0.1:8080\", FlowerClient())``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 msgid "" "Flower 1.0 (keyword arguments): " "``start_client(server_address=\"127.0.0.1:8080\", " @@ -7681,12 +7959,12 @@ msgstr "" "Flower 1.0(关键字参数): ``start_client(server_address=\"127.0.0.1:8080\", " "client=FlowerClient())``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 #: ../../source/ref-api/flwr.client.Client.rst:2 msgid "Client" msgstr "客户端" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 msgid "" "Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " "``def get_parameters(self, config):``" @@ -7694,7 +7972,7 @@ msgstr "" "NumPyClient的子类:将``def get_parameters(self):```改为``def " "get_parameters(self,config):``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 msgid "" "Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " "get_parameters(self, ins: GetParametersIns):``" @@ -7702,11 +7980,11 @@ msgstr "" "客户端 \"的子类:将 \"get_parameters(self): \"改为 \"get_parameters(self, ins: " "GetParametersIns):\"" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 msgid "Strategies / ``start_server`` / ``start_simulation``" msgstr "策略 / ``start_server`` / ``start_simulation``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:53 msgid "" "Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " "``start_simulation``. Here's an example:" @@ -7714,7 +7992,7 @@ msgstr "" "向 ``start_server`` 和 ``start_simulation` 传递 ``ServerConfig``(而不是 " "dictionary)。下面是一个例子:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 msgid "" "Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " "\"round_timeout\": 600.0}, ...)``" @@ -7722,7 +8000,7 @@ msgstr "" "Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " "\"round_timeout\": 600.0}, ...)``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 msgid "" "Flower 1.0: ``start_server(..., " "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " @@ -7732,13 +8010,13 @@ msgstr "" "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 msgid "" "Replace ``num_rounds=1`` in ``start_simulation`` with the new " "``config=ServerConfig(...)`` (see previous item)" msgstr "将`start_simulation``中的`num_rounds=1``替换为新的`config=ServerConfig(...)`(参见前一项)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:63 msgid "" "Remove ``force_final_distributed_eval`` parameter from calls to " "``start_server``. Distributed evaluation on all clients can be enabled by" @@ -7748,19 +8026,19 @@ msgstr "" "删除调用 ``start_server`` 时的 ``force_final_distributed_eval` " "参数。可以通过配置策略,在最后一轮训练后对所有客户端进行抽样评估,从而启用对所有客户端的分布式评估。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:66 msgid "Rename parameter/ndarray conversion functions:" msgstr "重命名参数/数组转换函数:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:69 msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" msgstr "``weights_to_parameters`` --> ``ndarrays_to_parameters``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 msgid "" "Strategy initialization: if the strategy relies on the default values for" " ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " @@ -7774,23 +8052,23 @@ msgstr "" "``start_server` 或 ``start_simulation` 时未传递策略实例)现在应手动初始化 FedAvg,并将 " "`fraction_fit` 和 `fraction_evaluate` 设为 `0.1``。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" msgstr "重命名内置策略参数(例如,`FedAvg``):" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:79 msgid "``fraction_eval`` --> ``fraction_evaluate``" msgstr "``fraction_eval`` --> ``fraction_evaluate``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 msgid "``min_eval_clients`` --> ``min_evaluate_clients``" msgstr "``min_eval_clients`` --> ``min_evaluate_clients``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:81 msgid "``eval_fn`` --> ``evaluate_fn``" msgstr "``eval_fn`` --> ``evaluate_fn``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:83 msgid "" "Rename ``rnd`` to ``server_round``. This impacts multiple methods and " "functions, for example, ``configure_fit``, ``aggregate_fit``, " @@ -7800,11 +8078,11 @@ msgstr "" "``configure_fit``、``aggregate_fit``、``configure_evaluate``、`aggregate_evaluate``" " 和 ``evaluate_fn``。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" msgstr "在 ``evaluate_fn` 中添加 ``server_round` 和 ``config`:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 msgid "" "Flower 0.19: ``def evaluate(parameters: NDArrays) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" @@ -7812,7 +8090,7 @@ msgstr "" "Flower 0.19: ``def evaluate(parameters: NDArrays) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 msgid "" "Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " "config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " @@ -7822,11 +8100,11 @@ msgstr "" "config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " "Scalar]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:94 msgid "Custom strategies" msgstr "定制策略" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:96 msgid "" "The type of parameter ``failures`` has changed from " "``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " @@ -7839,13 +8117,13 @@ msgstr "" "BaseException]]``(在``agregate_fit``中)和``List[Union[Tuple[ClientProxy, " "EvaluateRes], BaseException]]``(在``agregate_evaluate``中)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:100 msgid "" "The ``Strategy`` method ``evaluate`` now receives the current round of " "federated learning/evaluation as the first parameter:" msgstr "``Strategy``方法 的``evaluate``现在会接收当前一轮联邦学习/评估作为第一个参数:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 msgid "" "Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:``" @@ -7853,7 +8131,7 @@ msgstr "" "Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " "Optional[Tuple[float, Dict[str, Scalar]]]:```" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 msgid "" "Flower 1.0: ``def evaluate(self, server_round: int, parameters: " "Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" @@ -7861,17 +8139,17 @@ msgstr "" "Flower 1.0: ``def evaluate(self, server_round: int, parameters: " "Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 msgid "Optional improvements" msgstr "可选的改进措施" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:111 msgid "" "Along with the necessary changes above, there are a number of potential " "improvements that just became possible:" msgstr "除了上述必要的改动之外,还有一些潜在的改进措施:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 msgid "" "Remove \"placeholder\" methods from subclasses of ``Client`` or " "``NumPyClient``. If you, for example, use server-side evaluation, then " @@ -7881,7 +8159,7 @@ msgstr "" "删除 ``Client`` 或 ``NumPyClient`` 子类中的 \"占位符 " "\"方法。例如,如果你使用服务器端评估,那么就不再需要``evaluate``的 \"空占位符 \"实现。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:117 msgid "" "Configure the round timeout via ``start_simulation``: " "``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " @@ -7891,12 +8169,12 @@ msgstr "" "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 -#: ../../source/how-to-upgrade-to-flower-next.rst:317 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:121 +#: ../../source/how-to-upgrade-to-flower-next.rst:348 msgid "Further help" msgstr "更多帮助" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 msgid "" "Most official `Flower code examples " "`_ are already updated" @@ -7925,7 +8203,7 @@ msgstr "" "欢迎阅读从 Flower 升级到 Flower Next 的迁移指南!无论您是经验丰富的用户还是刚刚开始使用 " "Flower,本指南都将帮助您顺利过渡现有设置,以利用 Flower Next 从 1.8 版开始的最新功能和改进。" -#: ../../source/how-to-upgrade-to-flower-next.rst:9 +#: ../../source/how-to-upgrade-to-flower-next.rst:11 #, fuzzy msgid "" "This guide shows how to reuse pre-``1.8`` Flower code with minimum code " @@ -7936,41 +8214,41 @@ msgstr "" "本指南展示了如何通过使用 Flower Next 中的*可兼容层*,以最小的代码改动重用```1.8```前的 Flower " "代码。在另一个指南中,我们将介绍如何使用纯 Flower Next API 端到端运行 Flower Next。" -#: ../../source/how-to-upgrade-to-flower-next.rst:13 +#: ../../source/how-to-upgrade-to-flower-next.rst:15 #, fuzzy msgid "Let's dive in!" msgstr "让我们深入了解一下!" -#: ../../source/how-to-upgrade-to-flower-next.rst:48 +#: ../../source/how-to-upgrade-to-flower-next.rst:68 #, fuzzy msgid "" "Here's how to update an existing installation of Flower to Flower Next " "with ``pip``:" msgstr "下面介绍如何使用 pip 或 Poetry 将现有安装更新到 Flower 1.0:" -#: ../../source/how-to-upgrade-to-flower-next.rst:54 +#: ../../source/how-to-upgrade-to-flower-next.rst:74 #, fuzzy msgid "or if you need Flower Next with simulation:" msgstr "启动 Flower 模拟" -#: ../../source/how-to-upgrade-to-flower-next.rst:61 +#: ../../source/how-to-upgrade-to-flower-next.rst:80 #, fuzzy msgid "" "Ensure you set the following version constraint in your " "``requirements.txt``" msgstr "确保在 ``requirements.txt`` 中设置了以下版本限制" -#: ../../source/how-to-upgrade-to-flower-next.rst:71 +#: ../../source/how-to-upgrade-to-flower-next.rst:90 #, fuzzy msgid "or ``pyproject.toml``:" msgstr "或 ``pyproject.toml```:" -#: ../../source/how-to-upgrade-to-flower-next.rst:82 +#: ../../source/how-to-upgrade-to-flower-next.rst:101 #, fuzzy msgid "Using Poetry" msgstr "使用 pip" -#: ../../source/how-to-upgrade-to-flower-next.rst:84 +#: ../../source/how-to-upgrade-to-flower-next.rst:103 #, fuzzy msgid "" "Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " @@ -7980,14 +8258,14 @@ msgstr "" "Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` 依赖包,然后重新安装(运行 ``poetry install``" " 前,别忘了通过 ``rm poetry.lock` 删除 ``poetry.lock`)。" -#: ../../source/how-to-upgrade-to-flower-next.rst:86 +#: ../../source/how-to-upgrade-to-flower-next.rst:106 #, fuzzy msgid "" "Ensure you set the following version constraint in your " "``pyproject.toml``:" msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: ../../source/how-to-upgrade-to-flower-next.rst:102 +#: ../../source/how-to-upgrade-to-flower-next.rst:122 #, fuzzy msgid "" "In Flower Next, the *infrastructure* and *application layers* have been " @@ -8006,36 +8284,36 @@ msgstr "" "并通过命令行启动它。服务器和客户端的长期运行组件被称为超级链接(SuperLink)和超级节点(SuperNode)。以下是无需手动更新的非破坏性更改,可让您以传统方式和" " Flower Next 方式运行项目:" -#: ../../source/how-to-upgrade-to-flower-next.rst:109 +#: ../../source/how-to-upgrade-to-flower-next.rst:131 #, fuzzy msgid "|clientapp_link|_" msgstr "客户端" -#: ../../source/how-to-upgrade-to-flower-next.rst:110 +#: ../../source/how-to-upgrade-to-flower-next.rst:133 #, fuzzy msgid "" "Wrap your existing client with |clientapp_link|_ instead of launching it " "via |startclient_link|_. Here's an example:" msgstr "用 |clientapp_link|_ 封装现有客户端,而不是通过 |startclient_link|_ 启动。下面是一个例子:" -#: ../../source/how-to-upgrade-to-flower-next.rst:132 +#: ../../source/how-to-upgrade-to-flower-next.rst:156 #, fuzzy msgid "|serverapp_link|_" msgstr "服务器" -#: ../../source/how-to-upgrade-to-flower-next.rst:133 +#: ../../source/how-to-upgrade-to-flower-next.rst:158 #, fuzzy msgid "" "Wrap your existing strategy with |serverapp_link|_ instead of starting " "the server via |startserver_link|_. Here's an example:" msgstr "用 |serverapp_link|_ 包住现有策略,而不是通过 |startserver_link|_ 启动服务器。下面是一个例子:" -#: ../../source/how-to-upgrade-to-flower-next.rst:154 +#: ../../source/how-to-upgrade-to-flower-next.rst:179 #, fuzzy msgid "Deployment" msgstr "调配" -#: ../../source/how-to-upgrade-to-flower-next.rst:155 +#: ../../source/how-to-upgrade-to-flower-next.rst:181 #, fuzzy msgid "" "Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " @@ -8047,14 +8325,14 @@ msgstr "" "之前,使用 |flowernext_superlink_link|_ 运行 ``SuperLink`` 。无需将 |client.py` 和 " "`server.py` 作为 Python 脚本执行。" -#: ../../source/how-to-upgrade-to-flower-next.rst:158 +#: ../../source/how-to-upgrade-to-flower-next.rst:184 #, fuzzy msgid "" "Here's an example to start the server without HTTPS (only for " "prototyping):" msgstr "下面是一个在不使用 HTTPS 的情况下启动服务器的示例(仅用于原型开发):" -#: ../../source/how-to-upgrade-to-flower-next.rst:174 +#: ../../source/how-to-upgrade-to-flower-next.rst:200 #, fuzzy msgid "" "Here's another example to start with HTTPS. Use the ``--ssl-ca-" @@ -8063,12 +8341,12 @@ msgid "" "private key)." msgstr "下面是另一个使用 HTTPS 的示例。使用 ``--certificates`` 命令行参数传递路径(CA 证书、服务器证书和服务器私钥)。" -#: ../../source/how-to-upgrade-to-flower-next.rst:201 +#: ../../source/how-to-upgrade-to-flower-next.rst:228 #, fuzzy msgid "Simulation in CLI" msgstr "运行模拟" -#: ../../source/how-to-upgrade-to-flower-next.rst:202 +#: ../../source/how-to-upgrade-to-flower-next.rst:230 #, fuzzy msgid "" "Wrap your existing client and strategy with |clientapp_link|_ and " @@ -8078,7 +8356,7 @@ msgstr "" "分别用 |clientapp_link|_ 和 |serverapp_link|_ 封装现有的客户端和策略。无需再使用 " "|startsim_link|_。下面是一个示例:" -#: ../../source/how-to-upgrade-to-flower-next.rst:232 +#: ../../source/how-to-upgrade-to-flower-next.rst:263 #, fuzzy msgid "" "Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " @@ -8090,7 +8368,7 @@ msgstr "" "/``client_app`` 对象,而不是执行 Python 脚本。下面是一个示例(假定 `server_app`` 和 " "`client_app`` 对象位于 `sim.py`` 模块中):" -#: ../../source/how-to-upgrade-to-flower-next.rst:249 +#: ../../source/how-to-upgrade-to-flower-next.rst:280 #, fuzzy msgid "" "Set default resources for each |clientapp_link|_ using the ``--backend-" @@ -8100,19 +8378,19 @@ msgstr "" "使用 ``--backend-config`` 命令行参数为每个 |clientapp_link|_ 设置默认资源,而不是在 " "|startsim_link|_ 中设置 ``client_resources`` 参数。下面是一个例子:" -#: ../../source/how-to-upgrade-to-flower-next.rst:275 +#: ../../source/how-to-upgrade-to-flower-next.rst:304 #, fuzzy msgid "Simulation in a Notebook" msgstr "笔记本中的模拟" -#: ../../source/how-to-upgrade-to-flower-next.rst:276 +#: ../../source/how-to-upgrade-to-flower-next.rst:306 #, fuzzy msgid "" "Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " "an example:" msgstr "在笔记本中运行 |runsim_link|_,而不是 |startsim_link|_。下面是一个例子:" -#: ../../source/how-to-upgrade-to-flower-next.rst:319 +#: ../../source/how-to-upgrade-to-flower-next.rst:350 #, fuzzy msgid "" "Some official `Flower code examples `_ " @@ -8127,12 +8405,12 @@ msgstr "" " 已经更新到 Flower 1.0,它们可以作为使用 Flower 1.0 API 的参考。如果还有其他问题,请加入 Flower Slack " "`_ 并使用 \"#questions``\"。" -#: ../../source/how-to-upgrade-to-flower-next.rst:325 +#: ../../source/how-to-upgrade-to-flower-next.rst:357 #, fuzzy msgid "Important" msgstr "重要变更:" -#: ../../source/how-to-upgrade-to-flower-next.rst:328 +#: ../../source/how-to-upgrade-to-flower-next.rst:359 #, fuzzy msgid "" "As we continuously enhance Flower Next at a rapid pace, we'll be " @@ -8140,7 +8418,7 @@ msgid "" "with us!" msgstr "随着 Flower Next 的不断快速改进,我们将定期更新本指南。如有任何反馈,请随时与我们分享!" -#: ../../source/how-to-upgrade-to-flower-next.rst:334 +#: ../../source/how-to-upgrade-to-flower-next.rst:365 #, fuzzy msgid "Happy migrating! 🚀" msgstr "移民愉快!🚀" @@ -8157,7 +8435,7 @@ msgid "" " interfaces may change in future versions.**" msgstr "**注:本教程涵盖实验性功能。功能和界面可能会在未来版本中发生变化。" -#: ../../source/how-to-use-built-in-mods.rst:6 +#: ../../source/how-to-use-built-in-mods.rst:7 #, fuzzy msgid "" "In this tutorial, we will learn how to utilize built-in mods to augment " @@ -8168,12 +8446,12 @@ msgstr "" "在本教程中,我们将学习如何利用内置模块来增强 ``ClientApp`` 的行为。修改器(有时也称为修改器)允许我们在 ``ClientApp``" " 处理任务之前和之后执行操作。" -#: ../../source/how-to-use-built-in-mods.rst:9 +#: ../../source/how-to-use-built-in-mods.rst:12 #, fuzzy msgid "What are Mods?" msgstr "什么是 Mods?" -#: ../../source/how-to-use-built-in-mods.rst:11 +#: ../../source/how-to-use-built-in-mods.rst:14 #, fuzzy msgid "" "A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " @@ -8183,95 +8461,95 @@ msgstr "" "Mod 是包裹在 ``ClientApp`` 周围的可调用程序。它可以操作或检查传入的 ``Message`` 和由此产生的传出的 " "``Message`` 。一个 ``Mod`` 的签名如下:" -#: ../../source/how-to-use-built-in-mods.rst:18 +#: ../../source/how-to-use-built-in-mods.rst:23 #, fuzzy msgid "A typical mod function might look something like this:" msgstr "一个典型的修改函数可能是这样的:" -#: ../../source/how-to-use-built-in-mods.rst:31 +#: ../../source/how-to-use-built-in-mods.rst:36 #, fuzzy msgid "Using Mods" msgstr "使用修改器" -#: ../../source/how-to-use-built-in-mods.rst:33 +#: ../../source/how-to-use-built-in-mods.rst:38 #, fuzzy msgid "To use mods in your ``ClientApp``, you can follow these steps:" msgstr "要在您的 ``ClientApp`` 中使用 mod,可以按照以下步骤操作:" -#: ../../source/how-to-use-built-in-mods.rst:36 +#: ../../source/how-to-use-built-in-mods.rst:41 #, fuzzy msgid "1. Import the required mods" msgstr "1. 导入所需修改" -#: ../../source/how-to-use-built-in-mods.rst:38 +#: ../../source/how-to-use-built-in-mods.rst:43 #, fuzzy msgid "First, import the built-in mod you intend to use:" msgstr "首先,导入您打算使用的内置模式:" -#: ../../source/how-to-use-built-in-mods.rst:46 +#: ../../source/how-to-use-built-in-mods.rst:51 #, fuzzy msgid "2. Define your client function" msgstr "2. 定义客户功能" -#: ../../source/how-to-use-built-in-mods.rst:48 +#: ../../source/how-to-use-built-in-mods.rst:53 #, fuzzy msgid "" "Define your client function (``client_fn``) that will be wrapped by the " "mod(s):" msgstr "定义将被 mod 封装的客户端函数(``client_fn``):" -#: ../../source/how-to-use-built-in-mods.rst:57 +#: ../../source/how-to-use-built-in-mods.rst:62 #, fuzzy msgid "3. Create the ``ClientApp`` with mods" msgstr "3. 用模块创建 ``ClientApp``" -#: ../../source/how-to-use-built-in-mods.rst:59 +#: ../../source/how-to-use-built-in-mods.rst:64 #, fuzzy msgid "" "Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " "argument. The order in which you provide the mods matters:" msgstr "创建您的 ``ClientApp`` 并将 mods 作为列表传递给 ``mods`` 参数。提供 mod 的顺序很重要:" -#: ../../source/how-to-use-built-in-mods.rst:72 +#: ../../source/how-to-use-built-in-mods.rst:78 #, fuzzy msgid "Order of execution" msgstr "停用" -#: ../../source/how-to-use-built-in-mods.rst:74 +#: ../../source/how-to-use-built-in-mods.rst:80 #, fuzzy msgid "" "When the ``ClientApp`` runs, the mods are executed in the order they are " "provided in the list:" msgstr "当运行 ``ClientApp`` 时,会按照列表中提供的顺序执行模块:" -#: ../../source/how-to-use-built-in-mods.rst:76 +#: ../../source/how-to-use-built-in-mods.rst:83 #, fuzzy msgid "``example_mod_1`` (outermost mod)" msgstr "``example_mod_1`` (最外层模块)" -#: ../../source/how-to-use-built-in-mods.rst:77 +#: ../../source/how-to-use-built-in-mods.rst:84 #, fuzzy msgid "``example_mod_2`` (next mod)" msgstr "示例模式 2(下一个模式)" -#: ../../source/how-to-use-built-in-mods.rst:78 +#: ../../source/how-to-use-built-in-mods.rst:85 #, fuzzy msgid "" "Message handler (core function that handles the incoming ``Message`` and " "returns the outgoing ``Message``)" msgstr "消息处理程序(处理传入的 \"消息 \"并返回传出的 \"消息 \"的核心函数)" -#: ../../source/how-to-use-built-in-mods.rst:79 +#: ../../source/how-to-use-built-in-mods.rst:87 #, fuzzy msgid "``example_mod_2`` (on the way back)" msgstr "``example_mod_2`` (返回途中)" -#: ../../source/how-to-use-built-in-mods.rst:80 +#: ../../source/how-to-use-built-in-mods.rst:88 #, fuzzy msgid "``example_mod_1`` (outermost mod on the way back)" msgstr "``example_mod_1`` (返回途中最外层的模式)" -#: ../../source/how-to-use-built-in-mods.rst:82 +#: ../../source/how-to-use-built-in-mods.rst:90 #, fuzzy msgid "" "Each mod has a chance to inspect and modify the incoming ``Message`` " @@ -8279,7 +8557,7 @@ msgid "" "``Message`` before returning it up the stack." msgstr "每个模块都有机会检查和修改传入的 \"信息\",然后再将其传递给下一个模块,同样,也有机会检查和修改传出的 \"信息\",然后再将其返回堆栈。" -#: ../../source/how-to-use-built-in-mods.rst:87 +#: ../../source/how-to-use-built-in-mods.rst:97 #, fuzzy msgid "" "By following this guide, you have learned how to effectively use mods to " @@ -8289,7 +8567,7 @@ msgstr "" "通过本指南,您已学会如何有效地使用 mod 来增强您的 ``ClientApp`` 的功能。请记住,mod " "的顺序至关重要,它会影响输入和输出的处理方式。" -#: ../../source/how-to-use-built-in-mods.rst:89 +#: ../../source/how-to-use-built-in-mods.rst:101 #, fuzzy msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" msgstr "使用 mods 构建更强大、更灵活的 \"客户端应用程序\"!" @@ -8299,7 +8577,7 @@ msgstr "使用 mods 构建更强大、更灵活的 \"客户端应用程序\"!" msgid "Use Differential Privacy" msgstr "差分隐私" -#: ../../source/how-to-use-differential-privacy.rst:3 +#: ../../source/how-to-use-differential-privacy.rst:4 #, fuzzy msgid "" "This guide explains how you can utilize differential privacy in the " @@ -8309,7 +8587,7 @@ msgstr "" "本指南解释了如何在 Flower 框架中使用差分隐私。如果您还不熟悉差分隐私,可以参考 :doc:`explanation-" "differential-privacy` 。" -#: ../../source/how-to-use-differential-privacy.rst:7 +#: ../../source/how-to-use-differential-privacy.rst:10 #, fuzzy msgid "" "Differential Privacy in Flower is in a preview phase. If you plan to use " @@ -8320,7 +8598,7 @@ msgstr "" "Flower " "中的差异隐私处于预览阶段。如果您计划在生产环境中使用这些敏感数据功能,请随时联系我们,讨论您的需求,并获得如何最好地使用这些功能的指导。" -#: ../../source/how-to-use-differential-privacy.rst:12 +#: ../../source/how-to-use-differential-privacy.rst:17 #, fuzzy msgid "" "This approach consists of two separate phases: clipping of the updates " @@ -8329,7 +8607,7 @@ msgid "" "the server side or the client side." msgstr "这种方法包括两个独立的阶段:对更新进行剪切和在聚合模型中添加噪声。在剪切阶段,Flower 框架可以决定是在服务器端还是在客户端执行剪切。" -#: ../../source/how-to-use-differential-privacy.rst:15 +#: ../../source/how-to-use-differential-privacy.rst:21 #, fuzzy msgid "" "**Server-side Clipping**: This approach has the advantage of the server " @@ -8341,7 +8619,7 @@ msgstr "" "** 服务器端剪切**: " "这种方法的优点是服务器可对所有客户端的更新执行统一的剪切,并减少剪切值的通信开销。不过,这种方法也有缺点,那就是需要为所有客户端执行剪切操作,从而增加了服务器的计算负荷。" -#: ../../source/how-to-use-differential-privacy.rst:16 +#: ../../source/how-to-use-differential-privacy.rst:26 #, fuzzy msgid "" "**Client-side Clipping**: This approach has the advantage of reducing the" @@ -8350,20 +8628,20 @@ msgid "" "control over the clipping process." msgstr "**客户端剪切**: 这种方法的优点是可以减少服务器的计算开销。不过,它也有缺乏集中控制的缺点,因为服务器对剪切过程的控制较少。" -#: ../../source/how-to-use-differential-privacy.rst:21 +#: ../../source/how-to-use-differential-privacy.rst:31 #, fuzzy msgid "Server-side Clipping" msgstr "服务器端逻辑" -#: ../../source/how-to-use-differential-privacy.rst:22 +#: ../../source/how-to-use-differential-privacy.rst:33 #, fuzzy msgid "" -"For central DP with server-side clipping, there are two :code:`Strategy` " -"classes that act as wrappers around the actual :code:`Strategy` instance " -"(for example, :code:`FedAvg`). The two wrapper classes are " -":code:`DifferentialPrivacyServerSideFixedClipping` and " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " -"adaptive clipping." +"For central DP with server-side clipping, there are two ``Strategy`` " +"classes that act as wrappers around the actual ``Strategy`` instance (for" +" example, ``FedAvg``). The two wrapper classes are " +"``DifferentialPrivacyServerSideFixedClipping`` and " +"``DifferentialPrivacyServerSideAdaptiveClipping`` for fixed and adaptive " +"clipping." msgstr "" "对于具有服务器端剪裁功能的中央 DP,有两个 :code:`Strategy` 类作为实际 :code:`Strategy` 实例(例如 " ":code:`FedAvg`)的包装器。这两个封装类分别是 " @@ -8375,35 +8653,34 @@ msgstr "" msgid "server side clipping" msgstr "服务器端逻辑" -#: ../../source/how-to-use-differential-privacy.rst:31 +#: ../../source/how-to-use-differential-privacy.rst:43 #, fuzzy msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use server-" -"side fixed clipping using the " -":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " -"same approach can be used with " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " +"The code sample below enables the ``FedAvg`` strategy to use server-side " +"fixed clipping using the ``DifferentialPrivacyServerSideFixedClipping`` " +"wrapper class. The same approach can be used with " +"``DifferentialPrivacyServerSideAdaptiveClipping`` by adjusting the " "corresponding input parameters." msgstr "" "下面的代码示例使用 :code:`DifferentialPrivacyServerSideFixedClipping` 封装类使 " ":code:`FedAvg` 策略使用服务器端固定剪辑。通过调整相应的输入参数,同样的方法也可用于 " ":code:`DifferentialPrivacyServerSideAdaptiveClipping`。" -#: ../../source/how-to-use-differential-privacy.rst:52 +#: ../../source/how-to-use-differential-privacy.rst:64 #, fuzzy msgid "Client-side Clipping" msgstr "客户端逻辑" -#: ../../source/how-to-use-differential-privacy.rst:53 +#: ../../source/how-to-use-differential-privacy.rst:66 #, fuzzy msgid "" "For central DP with client-side clipping, the server sends the clipping " "value to selected clients on each round. Clients can use existing Flower " -":code:`Mods` to perform the clipping. Two mods are available for fixed " -"and adaptive client-side clipping: :code:`fixedclipping_mod` and " -":code:`adaptiveclipping_mod` with corresponding server-side wrappers " -":code:`DifferentialPrivacyClientSideFixedClipping` and " -":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +"``Mods`` to perform the clipping. Two mods are available for fixed and " +"adaptive client-side clipping: ``fixedclipping_mod`` and " +"``adaptiveclipping_mod`` with corresponding server-side wrappers " +"``DifferentialPrivacyClientSideFixedClipping`` and " +"``DifferentialPrivacyClientSideAdaptiveClipping``." msgstr "" "对于带有客户端剪裁功能的中央 DP,服务器会在每一轮向选定的客户端发送剪裁值。客户端可以使用现有的 Flower " ":code:`Mods`来执行剪裁。有两种模式可用于固定和自适应客户端剪辑::code:`fixedclipping_mod` 和 " @@ -8416,28 +8693,28 @@ msgstr "" msgid "client side clipping" msgstr "客户端逻辑" -#: ../../source/how-to-use-differential-privacy.rst:63 +#: ../../source/how-to-use-differential-privacy.rst:78 #, fuzzy msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use " -"differential privacy with client-side fixed clipping using both the " -":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " -"the client, :code:`fixedclipping_mod`:" +"The code sample below enables the ``FedAvg`` strategy to use differential" +" privacy with client-side fixed clipping using both the " +"``DifferentialPrivacyClientSideFixedClipping`` wrapper class and, on the " +"client, ``fixedclipping_mod``:" msgstr "" "下面的代码示例使用 :code:`DifferentialPrivacyClientSideFixedClipping` 封装类和客户端的 " ":code:`fixedclipping_mod` 使 :code:`FedAvg` 策略在客户端固定剪辑的情况下使用差分隐私:" -#: ../../source/how-to-use-differential-privacy.rst:80 +#: ../../source/how-to-use-differential-privacy.rst:97 #, fuzzy msgid "" -"In addition to the server-side strategy wrapper, the :code:`ClientApp` " -"needs to configure the matching :code:`fixedclipping_mod` to perform the " -"client-side clipping:" +"In addition to the server-side strategy wrapper, the ``ClientApp`` needs " +"to configure the matching ``fixedclipping_mod`` to perform the client-" +"side clipping:" msgstr "" "除了服务器端策略包装器外,:code:`ClientApp` 还需要配置匹配的 :code:`fixedclipping_mod` " "以执行客户端剪切:" -#: ../../source/how-to-use-differential-privacy.rst:97 +#: ../../source/how-to-use-differential-privacy.rst:115 #, fuzzy msgid "" "To utilize local differential privacy (DP) and add noise to the client " @@ -8453,12 +8730,12 @@ msgstr "" msgid "local DP mod" msgstr "本地 DP 模式" -#: ../../source/how-to-use-differential-privacy.rst:104 +#: ../../source/how-to-use-differential-privacy.rst:125 #, fuzzy -msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +msgid "Below is a code example that shows how to use ``LocalDpMod``:" msgstr "下面的代码示例展示了如何使用 :code:`LocalDpMod`:" -#: ../../source/how-to-use-differential-privacy.rst:122 +#: ../../source/how-to-use-differential-privacy.rst:140 #, fuzzy msgid "" "Please note that the order of mods, especially those that modify " @@ -8467,12 +8744,12 @@ msgid "" "parameters." msgstr "请注意,在使用多个修改器时,修改器(尤其是修改参数的修改器)的顺序非常重要。通常情况下,差分隐私 (DP) 修改器应最后对参数进行操作。" -#: ../../source/how-to-use-differential-privacy.rst:125 +#: ../../source/how-to-use-differential-privacy.rst:145 #, fuzzy msgid "Local Training using Privacy Engines" msgstr "使用隐私引擎进行本地培训" -#: ../../source/how-to-use-differential-privacy.rst:126 +#: ../../source/how-to-use-differential-privacy.rst:147 #, fuzzy msgid "" "For ensuring data instance-level privacy during local model training on " @@ -8494,106 +8771,110 @@ msgid "Use strategies" msgstr "使用策略" #: ../../source/how-to-use-strategies.rst:4 +#, fuzzy msgid "" "Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." +"``Strategy`` abstraction. A number of built-in strategies are provided in" +" the core framework." msgstr "Flower 允许通过 :code:`Strategy` 抽象类对学习过程进行完全定制。核心框架中提供了许多内置策略。" -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-use-strategies.rst:7 msgid "" "There are three ways to customize the way Flower orchestrates the " "learning process on the server side:" msgstr "有三种方法可以自定义 Flower 在服务器端协调学习过程的方式:" -#: ../../source/how-to-use-strategies.rst:8 -msgid "Use an existing strategy, for example, :code:`FedAvg`" +#: ../../source/how-to-use-strategies.rst:10 +#, fuzzy +msgid "Use an existing strategy, for example, ``FedAvg``" msgstr "使用现有策略,例如 :code:`FedAvg`" -#: ../../source/how-to-use-strategies.rst:9 -#: ../../source/how-to-use-strategies.rst:40 +#: ../../source/how-to-use-strategies.rst:11 +#: ../../source/how-to-use-strategies.rst:43 msgid "Customize an existing strategy with callback functions" msgstr "使用回调函数定制现有策略" -#: ../../source/how-to-use-strategies.rst:10 -#: ../../source/how-to-use-strategies.rst:87 +#: ../../source/how-to-use-strategies.rst:12 +#: ../../source/how-to-use-strategies.rst:99 msgid "Implement a novel strategy" msgstr "实施新策略" -#: ../../source/how-to-use-strategies.rst:14 +#: ../../source/how-to-use-strategies.rst:15 msgid "Use an existing strategy" msgstr "使用现有策略" -#: ../../source/how-to-use-strategies.rst:16 +#: ../../source/how-to-use-strategies.rst:17 msgid "" "Flower comes with a number of popular federated learning strategies " "built-in. A built-in strategy can be instantiated as follows:" msgstr "Flower 内置了许多流行的联邦学习策略。内置策略的实例化方法如下:" -#: ../../source/how-to-use-strategies.rst:25 +#: ../../source/how-to-use-strategies.rst:27 +#, fuzzy msgid "" "This creates a strategy with all parameters left at their default values " -"and passes it to the :code:`start_server` function. It is usually " -"recommended to adjust a few parameters during instantiation:" +"and passes it to the ``start_server`` function. It is usually recommended" +" to adjust a few parameters during instantiation:" msgstr "这会创建一个所有参数都保持默认值的策略,并将其传递给 :code:`start_server` 函数。通常建议在实例化过程中调整一些参数:" -#: ../../source/how-to-use-strategies.rst:42 +#: ../../source/how-to-use-strategies.rst:45 msgid "" "Existing strategies provide several ways to customize their behaviour. " "Callback functions allow strategies to call user-provided code during " "execution." msgstr "现有的策略提供了多种自定义行为的方法。回调函数允许策略在执行过程中调用用户提供的代码。" -#: ../../source/how-to-use-strategies.rst:45 +#: ../../source/how-to-use-strategies.rst:49 msgid "Configuring client fit and client evaluate" msgstr "配置客户匹配和客户评估" -#: ../../source/how-to-use-strategies.rst:47 +#: ../../source/how-to-use-strategies.rst:51 +#, fuzzy msgid "" "The server can pass new configuration values to the client each round by " -"providing a function to :code:`on_fit_config_fn`. The provided function " -"will be called by the strategy and must return a dictionary of " -"configuration key values pairs that will be sent to the client. It must " -"return a dictionary of arbitrary configuration values :code:`client.fit`" -" and :code:`client.evaluate` functions during each round of federated " -"learning." +"providing a function to ``on_fit_config_fn``. The provided function will " +"be called by the strategy and must return a dictionary of configuration " +"key values pairs that will be sent to the client. It must return a " +"dictionary of arbitrary configuration values ``client.fit`` and " +"``client.evaluate`` functions during each round of federated learning." msgstr "" "服务器可以通过向 :code:`on_fit_config_fn` " "提供一个函数,在每一轮向客户端传递新的配置值。提供的函数将被策略调用,并且必须返回一个配置键值对的字典,该字典将被发送到客户端。在每一轮联邦学习期间,它必须返回一个任意配置值" " dictionary :code:`client.fit`和 :code:`client.evaluate`函数。" -#: ../../source/how-to-use-strategies.rst:75 +#: ../../source/how-to-use-strategies.rst:84 #, fuzzy msgid "" -"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " +"The ``on_fit_config_fn`` can be used to pass arbitrary configuration " "values from server to client, and potentially change these values each " "round, for example, to adjust the learning rate. The client will receive " -"the dictionary returned by the :code:`on_fit_config_fn` in its own " -":code:`client.fit()` function." +"the dictionary returned by the ``on_fit_config_fn`` in its own " +"``client.fit()`` function." msgstr "" ":code:`on_fit_config_fn`可用于将任意配置值从服务器传递到客户端,并在每一轮改变这些值,例如,调整学习率。客户端将在自己的 " ":code:`client.fit()` 函数中接收 :code:`on_fit_config_fn` 返回的字典。" -#: ../../source/how-to-use-strategies.rst:78 +#: ../../source/how-to-use-strategies.rst:89 +#, fuzzy msgid "" -"Similar to :code:`on_fit_config_fn`, there is also " -":code:`on_evaluate_config_fn` to customize the configuration sent to " -":code:`client.evaluate()`" +"Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` " +"to customize the configuration sent to ``client.evaluate()``" msgstr "" "与 :code:`on_fit_config_fn` 类似,还有 :code:`on_evaluate_config_fn` 用于定制发送到 " ":code:`client.evaluate()` 的配置" -#: ../../source/how-to-use-strategies.rst:81 +#: ../../source/how-to-use-strategies.rst:93 msgid "Configuring server-side evaluation" msgstr "配置服务器端评估" -#: ../../source/how-to-use-strategies.rst:83 +#: ../../source/how-to-use-strategies.rst:95 +#, fuzzy msgid "" "Server-side evaluation can be enabled by passing an evaluation function " -"to :code:`evaluate_fn`." +"to ``evaluate_fn``." msgstr "服务器端评估可通过向 :code:`evaluate_fn` 传递评估函数来启用。" -#: ../../source/how-to-use-strategies.rst:89 +#: ../../source/how-to-use-strategies.rst:101 msgid "" "Writing a fully custom strategy is a bit more involved, but it provides " "the most flexibility. Read the `Implementing Strategies ` | :doc:`Android ` | :doc:`iOS `" -#: ../../source/index.rst:64 +#: ../../source/index.rst:70 msgid "We also made video tutorials for PyTorch:" msgstr "我们还为 PyTorch 制作了视频教程:" -#: ../../source/index.rst:69 +#: ../../source/index.rst:75 msgid "And TensorFlow:" msgstr "还有 TensorFlow:" -#: ../../source/index.rst:77 +#: ../../source/index.rst:83 msgid "" "Problem-oriented how-to guides show step-by-step how to achieve a " "specific goal." msgstr "以问题为导向的 \"如何做 \"指南逐步展示如何实现特定目标。" -#: ../../source/index.rst:110 +#: ../../source/index.rst:116 msgid "" "Understanding-oriented concept guides explain and discuss key topics and " "underlying ideas behind Flower and collaborative AI." msgstr "以理解为导向的概念指南解释并讨论了Flower和协作式人工智能背后的关键主题和基本思想。" -#: ../../source/index.rst:121 +#: ../../source/index.rst:128 msgid "References" msgstr "参考资料" -#: ../../source/index.rst:123 +#: ../../source/index.rst:130 msgid "Information-oriented API reference and other reference material." msgstr "以信息为导向的 API 参考资料和其他参考资料。" -#: ../../source/index.rst:132::1 +#: ../../source/index.rst:139::1 #, fuzzy msgid ":py:obj:`flwr `\\" msgstr ":py:obj:`flwr `\\" -#: ../../source/index.rst:132::1 flwr:1 of +#: ../../source/index.rst:139::1 flwr:1 of #, fuzzy msgid "Flower main package." msgstr "Flower 主包装。" -#: ../../source/index.rst:149 +#: ../../source/index.rst:155 msgid "Contributor docs" msgstr "贡献者文档" -#: ../../source/index.rst:151 +#: ../../source/index.rst:157 msgid "" "The Flower community welcomes contributions. The following docs are " "intended to help along the way." @@ -8875,6 +9156,10 @@ msgstr "参数解析器" msgid "Optional argument" msgstr "可选的改进措施" +#: ../../flwr install:1 +msgid "The source FAB file to install." +msgstr "" + #: ../../flwr log:1 msgid "Get logs from a Flower project run." msgstr "" @@ -8883,19 +9168,32 @@ msgstr "" msgid "Flag to stream or print logs from the Flower run" msgstr "" -#: ../../flwr log +#: ../../flwr log run #, fuzzy msgid "default" msgstr "工作流程" #: ../../flwr log:1 -msgid "``True``" +msgid "``True``" +msgstr "" + +#: ../../flwr log:1 +#, fuzzy +msgid "Required argument" +msgstr "构建文档" + +#: ../../flwr log:1 +#, fuzzy +msgid "The Flower run ID to query" +msgstr "加入 Flower 社区" + +#: ../../flwr log:1 +msgid "Path of the Flower project to run" msgstr "" #: ../../flwr log:1 -#, fuzzy -msgid "Required argument" -msgstr "构建文档" +msgid "Name of the federation to run the app on" +msgstr "" #: ../../flwr new:1 #, fuzzy @@ -8921,6 +9219,11 @@ msgstr "" msgid "The Flower username of the author" msgstr "" +#: ../../flwr new:1 +#, fuzzy +msgid "The name of the Flower App" +msgstr "基础镜像的存储库名称。" + #: ../../flwr run:1 #, fuzzy msgid "Run Flower App." @@ -8942,6 +9245,26 @@ msgid "" " the `pyproject.toml` in order to be properly overriden." msgstr "" +#: ../../flwr run:1 +msgid "" +"Use `--stream` with `flwr run` to display logs; logs are not streamed by " +"default." +msgstr "" + +#: ../../flwr run:1 +#, fuzzy +msgid "``False``" +msgstr "``FLWR_VERSION``" + +#: ../../flwr run:1 +#, fuzzy +msgid "Path of the Flower App to run." +msgstr "基础镜像的存储库名称。" + +#: ../../flwr run:1 +msgid "Name of the federation to run the app on." +msgstr "" + #: ../../source/ref-api-cli.rst:16 #, fuzzy msgid "flower-simulation" @@ -8961,17 +9284,16 @@ msgstr "Flower 服务器" msgid "flower-server-app" msgstr "flower-driver-api" -#: ../../source/ref-api-cli.rst:49 +#: ../../source/ref-api-cli.rst:50 msgid "" -"Note that since version :code:`1.11.0`, :code:`flower-server-app` no " -"longer supports passing a reference to a `ServerApp` attribute. Instead, " -"you need to pass the path to Flower app via the argument :code:`--app`. " -"This is the path to a directory containing a `pyproject.toml`. You can " -"create a valid Flower app by executing :code:`flwr new` and following the" -" prompt." +"Note that since version ``1.11.0``, ``flower-server-app`` no longer " +"supports passing a reference to a `ServerApp` attribute. Instead, you " +"need to pass the path to Flower app via the argument ``--app``. This is " +"the path to a directory containing a `pyproject.toml`. You can create a " +"valid Flower app by executing ``flwr new`` and following the prompt." msgstr "" -#: ../../source/ref-api-cli.rst:62 +#: ../../source/ref-api-cli.rst:64 #, fuzzy msgid "flower-superexec" msgstr "flower-superlink" @@ -24756,13 +25078,16 @@ msgstr "" "`PyTorch `_ 或 `TensorFlow " "`_。" -#: ../../source/ref-example-projects.rst:10 +#: ../../source/ref-example-projects.rst:9 #, fuzzy -msgid "" -"The following examples are available as standalone projects. Quickstart " -"TensorFlow/Keras ---------------------------" +msgid "The following examples are available as standalone projects." msgstr "以下示例可作为独立项目使用。" +#: ../../source/ref-example-projects.rst:12 +#, fuzzy +msgid "Quickstart TensorFlow/Keras" +msgstr "快速入门 TensorFlow" + #: ../../source/ref-example-projects.rst:14 msgid "" "The TensorFlow/Keras quickstart example shows CIFAR-10 image " @@ -24778,14 +25103,14 @@ msgstr "" "`TensorFlow快速入门 (代码) `_" -#: ../../source/ref-example-projects.rst:18 +#: ../../source/ref-example-projects.rst:19 #, fuzzy msgid ":doc:`Quickstart TensorFlow (Tutorial) `" msgstr "" "`TensorFlow快速入门 (教程) `_" -#: ../../source/ref-example-projects.rst:19 +#: ../../source/ref-example-projects.rst:20 msgid "" "`Quickstart TensorFlow (Blog Post) `_" @@ -24793,18 +25118,18 @@ msgstr "" "`TensorFlow快速入门 (博客) `_" -#: ../../source/ref-example-projects.rst:23 -#: ../../source/tutorial-quickstart-pytorch.rst:5 +#: ../../source/ref-example-projects.rst:24 +#: ../../source/tutorial-quickstart-pytorch.rst:4 msgid "Quickstart PyTorch" msgstr "PyTorch快速入门" -#: ../../source/ref-example-projects.rst:25 +#: ../../source/ref-example-projects.rst:26 msgid "" "The PyTorch quickstart example shows CIFAR-10 image classification with a" " simple Convolutional Neural Network:" msgstr "PyTorch 快速入门范例展示了使用简单卷积神经网络进行 CIFAR-10 图像分类的情况:" -#: ../../source/ref-example-projects.rst:28 +#: ../../source/ref-example-projects.rst:29 msgid "" "`Quickstart PyTorch (Code) " "`_" @@ -24812,24 +25137,24 @@ msgstr "" "`PyTorch快速入门 (代码) `_" -#: ../../source/ref-example-projects.rst:29 +#: ../../source/ref-example-projects.rst:31 #, fuzzy msgid ":doc:`Quickstart PyTorch (Tutorial) `" msgstr "" "`PyTorch快速入门 (教程) `_" -#: ../../source/ref-example-projects.rst:33 +#: ../../source/ref-example-projects.rst:34 msgid "PyTorch: From Centralized To Federated" msgstr "PyTorch: 从集中式到联邦式" -#: ../../source/ref-example-projects.rst:35 +#: ../../source/ref-example-projects.rst:36 msgid "" "This example shows how a regular PyTorch project can be federated using " "Flower:" msgstr "本例展示了如何使用 Flower 联邦化一个普通的 PyTorch 项目:" -#: ../../source/ref-example-projects.rst:37 +#: ../../source/ref-example-projects.rst:38 msgid "" "`PyTorch: From Centralized To Federated (Code) " "`_" -#: ../../source/ref-example-projects.rst:38 +#: ../../source/ref-example-projects.rst:40 #, fuzzy msgid "" ":doc:`PyTorch: From Centralized To Federated (Tutorial) `_" -#: ../../source/ref-example-projects.rst:42 +#: ../../source/ref-example-projects.rst:44 msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" msgstr "树莓派和 Nvidia Jetson 上的联邦学习" -#: ../../source/ref-example-projects.rst:44 +#: ../../source/ref-example-projects.rst:46 msgid "" "This example shows how Flower can be used to build a federated learning " "system that run across Raspberry Pi and Nvidia Jetson:" msgstr "本示例展示了如何利用 Flower 建立一个跨 Raspberry Pi 和 Nvidia Jetson 运行的联邦学习系统:" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-example-projects.rst:49 msgid "" "`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " "`_" @@ -24865,7 +25190,7 @@ msgstr "" "Raspberry Pi 和 Nvidia Jetson 上的联邦学习(代码) " "`_" -#: ../../source/ref-example-projects.rst:47 +#: ../../source/ref-example-projects.rst:51 msgid "" "`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " "`_" @@ -24884,13 +25209,13 @@ msgstr "本页收集了有关 \"Flower 联邦学习 \"常见问题的答案。" msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" msgstr ":fa:`eye,mr-1` Flower 可以在 Juptyter Notebooks / Google Colab 上运行吗?" -#: ../../source/ref-faq.rst:8 +#: ../../source/ref-faq.rst:9 msgid "" "Yes, it can! Flower even comes with a few under-the-hood optimizations to" " make it work even better on Colab. Here's a quickstart example:" msgstr "是的,它可以!Flower 甚至还进行了一些底层优化,使其在 Colab 上运行得更好。下面是一个快速启动示例:" -#: ../../source/ref-faq.rst:10 +#: ../../source/ref-faq.rst:11 msgid "" "`Flower simulation PyTorch " "`_" -#: ../../source/ref-faq.rst:11 +#: ../../source/ref-faq.rst:12 msgid "" "`Flower simulation TensorFlow/Keras " "`_" @@ -24930,7 +25255,7 @@ msgstr "" msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" msgstr ":fa:`eye,mr-1` Flower 是否支持安卓设备上的联邦学习?" -#: ../../source/ref-faq.rst:19 +#: ../../source/ref-faq.rst:20 msgid "" "Yes, it does. Please take a look at our `blog post " "`_\" 或查看代码示例:" -#: ../../source/ref-faq.rst:21 +#: ../../source/ref-faq.rst:22 msgid "" "`Android Kotlin example `_" msgstr "`Android Kotlin 示例 `_" -#: ../../source/ref-faq.rst:22 +#: ../../source/ref-faq.rst:23 msgid "`Android Java example `_" msgstr "Android Java 示例 `_" @@ -24953,26 +25278,26 @@ msgstr "Android Java 示例 `_" msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" msgstr ":fa:`eye,mr-1` 我可以将联邦学习与区块链结合起来吗?" -#: ../../source/ref-faq.rst:26 +#: ../../source/ref-faq.rst:27 msgid "" "Yes, of course. A list of available examples using Flower within a " "blockchain environment is available here:" msgstr "当然可以。有关在区块链环境中使用 Flower 的可用示例列表,请点击此处:" -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-faq.rst:30 msgid "`FLock: A Decentralised AI Training Platform `_." msgstr "" -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-faq.rst:30 msgid "Contribute to on-chain training the model and earn rewards." msgstr "" -#: ../../source/ref-faq.rst:30 +#: ../../source/ref-faq.rst:31 #, fuzzy msgid "Local blockchain with federated learning simulation." msgstr "扩大联邦学习的规模" -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-faq.rst:32 msgid "" "`Flower meets Nevermined GitHub Repository `_." @@ -24980,7 +25305,7 @@ msgstr "" "`Flower meets Nevermined GitHub Repository `_." -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-faq.rst:33 msgid "" "`Flower meets Nevermined YouTube video " "`_." @@ -24988,7 +25313,7 @@ msgstr "" "`Flower meets Nevermined YouTube 视频 " "`_." -#: ../../source/ref-faq.rst:33 +#: ../../source/ref-faq.rst:34 #, fuzzy msgid "" "`Flower meets KOSMoS `_." -#: ../../source/ref-faq.rst:34 +#: ../../source/ref-faq.rst:35 msgid "" "`Flower meets Talan blog post `_ 。" -#: ../../source/ref-faq.rst:35 +#: ../../source/ref-faq.rst:36 msgid "" "`Flower meets Talan GitHub Repository " "`_ ." @@ -25239,17 +25564,17 @@ msgid "" "app using Flower." msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 Android 应用程序。" -#: ../../source/tutorial-quickstart-android.rst:5 +#: ../../source/tutorial-quickstart-android.rst:4 msgid "Quickstart Android" msgstr "快速入门 Android" -#: ../../source/tutorial-quickstart-android.rst:10 +#: ../../source/tutorial-quickstart-android.rst:9 msgid "" "Let's build a federated learning system using TFLite and Flower on " "Android!" msgstr "让我们在 Android 上使用 TFLite 和 Flower 构建一个联邦学习系统!" -#: ../../source/tutorial-quickstart-android.rst:12 +#: ../../source/tutorial-quickstart-android.rst:11 msgid "" "Please refer to the `full code example " "`_ to learn " @@ -25258,11 +25583,11 @@ msgstr "" "请参阅`完整代码示例 " "`_了解更多信息。" -#: ../../source/tutorial-quickstart-fastai.rst:5 +#: ../../source/tutorial-quickstart-fastai.rst:4 msgid "Quickstart fastai" msgstr "快速入门 fastai" -#: ../../source/tutorial-quickstart-fastai.rst:7 +#: ../../source/tutorial-quickstart-fastai.rst:6 #, fuzzy msgid "" "In this federated learning tutorial we will learn how to train a " @@ -25273,24 +25598,24 @@ msgstr "" "首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" -#: ../../source/tutorial-quickstart-fastai.rst:12 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +#: ../../source/tutorial-quickstart-fastai.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 msgid "Then, clone the code example directly from GitHub:" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:20 +#: ../../source/tutorial-quickstart-fastai.rst:18 msgid "" "This will create a new directory called `quickstart-fastai` containing " "the following files:" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:33 -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:33 +#: ../../source/tutorial-quickstart-fastai.rst:31 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 #, fuzzy msgid "Next, activate your environment, then run:" msgstr "并激活虚拟环境:" -#: ../../source/tutorial-quickstart-fastai.rst:43 +#: ../../source/tutorial-quickstart-fastai.rst:41 msgid "" "This example by default runs the Flower Simulation Engine, creating a " "federation of 10 nodes using `FedAvg `_ 中运行一切。" -#: ../../source/tutorial-quickstart-huggingface.rst:14 +#: ../../source/tutorial-quickstart-huggingface.rst:12 msgid "" "Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " "project. It will generate all the files needed to run, by default with " @@ -25363,75 +25688,75 @@ msgid "" "|iidpartitioner|_." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:20 -#: ../../source/tutorial-quickstart-mlx.rst:19 -#: ../../source/tutorial-quickstart-pytorch.rst:19 -#: ../../source/tutorial-quickstart-tensorflow.rst:20 +#: ../../source/tutorial-quickstart-huggingface.rst:17 +#: ../../source/tutorial-quickstart-mlx.rst:17 +#: ../../source/tutorial-quickstart-pytorch.rst:18 +#: ../../source/tutorial-quickstart-tensorflow.rst:18 #, fuzzy msgid "" "Now that we have a rough idea of what this example is about, let's get " "started. First, install Flower in your new environment:" msgstr "现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运行:" -#: ../../source/tutorial-quickstart-huggingface.rst:28 +#: ../../source/tutorial-quickstart-huggingface.rst:25 msgid "" "Then, run the command below. You will be prompted to select one of the " "available templates (choose ``HuggingFace``), give a name to your " "project, and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:36 -#: ../../source/tutorial-quickstart-mlx.rst:35 -#: ../../source/tutorial-quickstart-pytorch.rst:35 -#: ../../source/tutorial-quickstart-tensorflow.rst:36 +#: ../../source/tutorial-quickstart-huggingface.rst:33 +#: ../../source/tutorial-quickstart-mlx.rst:32 +#: ../../source/tutorial-quickstart-pytorch.rst:34 +#: ../../source/tutorial-quickstart-tensorflow.rst:34 msgid "" "After running it you'll notice a new directory with your project name has" " been created. It should have the following structure:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:50 -#: ../../source/tutorial-quickstart-mlx.rst:49 -#: ../../source/tutorial-quickstart-pytorch.rst:49 -#: ../../source/tutorial-quickstart-tensorflow.rst:50 +#: ../../source/tutorial-quickstart-huggingface.rst:47 +#: ../../source/tutorial-quickstart-mlx.rst:46 +#: ../../source/tutorial-quickstart-pytorch.rst:48 +#: ../../source/tutorial-quickstart-tensorflow.rst:48 msgid "" "If you haven't yet installed the project and its dependencies, you can do" " so by:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:58 -#: ../../source/tutorial-quickstart-pytorch.rst:57 -#: ../../source/tutorial-quickstart-tensorflow.rst:58 +#: ../../source/tutorial-quickstart-huggingface.rst:54 +#: ../../source/tutorial-quickstart-pytorch.rst:55 +#: ../../source/tutorial-quickstart-tensorflow.rst:55 msgid "To run the project, do:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:106 +#: ../../source/tutorial-quickstart-huggingface.rst:102 msgid "You can also run the project with GPU as follows:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:113 +#: ../../source/tutorial-quickstart-huggingface.rst:109 msgid "" "This will use the default arguments where each ``ClientApp`` will use 2 " "CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:124 -#: ../../source/tutorial-quickstart-mlx.rst:114 -#: ../../source/tutorial-quickstart-pytorch.rst:113 +#: ../../source/tutorial-quickstart-huggingface.rst:120 +#: ../../source/tutorial-quickstart-mlx.rst:110 +#: ../../source/tutorial-quickstart-pytorch.rst:111 msgid "" "What follows is an explanation of each component in the project you just " "created: dataset partition, the model, defining the ``ClientApp`` and " "defining the ``ServerApp``." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:130 -#: ../../source/tutorial-quickstart-mlx.rst:120 -#: ../../source/tutorial-quickstart-pytorch.rst:119 -#: ../../source/tutorial-quickstart-tensorflow.rst:116 +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 #, fuzzy msgid "The Data" msgstr "加载数据" -#: ../../source/tutorial-quickstart-huggingface.rst:132 +#: ../../source/tutorial-quickstart-huggingface.rst:126 msgid "" "This tutorial uses |flowerdatasets|_ to easily download and partition the" " `IMDB `_ dataset. In " @@ -25444,15 +25769,15 @@ msgid "" "their data partition." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:178 -#: ../../source/tutorial-quickstart-mlx.rst:164 -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#: ../../source/tutorial-quickstart-tensorflow.rst:145 +#: ../../source/tutorial-quickstart-huggingface.rst:171 +#: ../../source/tutorial-quickstart-mlx.rst:155 +#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-quickstart-tensorflow.rst:139 #, fuzzy msgid "The Model" msgstr "训练模型" -#: ../../source/tutorial-quickstart-huggingface.rst:180 +#: ../../source/tutorial-quickstart-huggingface.rst:173 #, fuzzy msgid "" "We will leverage 🤗 Hugging Face to federate the training of language " @@ -25465,13 +25790,13 @@ msgstr "" "我们将利用Hugging Face技术,使用 Flower 在多个客户端上联邦训练语言模型。更具体地说,我们将对预先训练好的 " "Transformer 模型(distilBERT)进行微调,以便在 IMDB 评分数据集上进行序列分类。最终目标是检测电影评分是正面还是负面。" -#: ../../source/tutorial-quickstart-huggingface.rst:193 +#: ../../source/tutorial-quickstart-huggingface.rst:185 msgid "" "Note that here, ``model_name`` is a string that will be loaded from the " "``Context`` in the ClientApp and ServerApp." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:196 +#: ../../source/tutorial-quickstart-huggingface.rst:188 msgid "" "In addition to loading the pretrained model weights and architecture, we " "also include two utility functions to perform both training (i.e. " @@ -25484,15 +25809,15 @@ msgid "" "perform local training or evaluation:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:239 -#: ../../source/tutorial-quickstart-mlx.rst:210 -#: ../../source/tutorial-quickstart-pytorch.rst:234 -#: ../../source/tutorial-quickstart-tensorflow.rst:176 +#: ../../source/tutorial-quickstart-huggingface.rst:228 +#: ../../source/tutorial-quickstart-mlx.rst:199 +#: ../../source/tutorial-quickstart-pytorch.rst:224 +#: ../../source/tutorial-quickstart-tensorflow.rst:168 #, fuzzy msgid "The ClientApp" msgstr "客户端" -#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-huggingface.rst:230 msgid "" "The main changes we have to make to use 🤗 Hugging Face with Flower will " "be found in the ``get_weights()`` and ``set_weights()`` functions. Under " @@ -25505,8 +25830,8 @@ msgid "" "them to an existing PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:254 -#: ../../source/tutorial-quickstart-pytorch.rst:245 +#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-pytorch.rst:234 msgid "" "The specific implementation of ``get_weights()`` and ``set_weights()`` " "depends on the type of models you use. The ones shown below work for a " @@ -25514,8 +25839,8 @@ msgid "" "have more exotic model architectures." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:269 -#: ../../source/tutorial-quickstart-pytorch.rst:261 +#: ../../source/tutorial-quickstart-huggingface.rst:257 +#: ../../source/tutorial-quickstart-pytorch.rst:250 msgid "" "The rest of the functionality is directly inspired by the centralized " "case. The ``fit()`` method in the client trains the model using the local" @@ -25523,7 +25848,7 @@ msgid "" "model received on a held-out validation set that the client might have:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:296 +#: ../../source/tutorial-quickstart-huggingface.rst:283 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " "defined above by means of a ``client_fn()`` callback. Note that the " @@ -25534,15 +25859,15 @@ msgid "" "additional hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:330 -#: ../../source/tutorial-quickstart-mlx.rst:376 -#: ../../source/tutorial-quickstart-pytorch.rst:321 -#: ../../source/tutorial-quickstart-tensorflow.rst:245 +#: ../../source/tutorial-quickstart-huggingface.rst:316 +#: ../../source/tutorial-quickstart-mlx.rst:361 +#: ../../source/tutorial-quickstart-pytorch.rst:307 +#: ../../source/tutorial-quickstart-tensorflow.rst:232 #, fuzzy msgid "The ServerApp" msgstr "服务器" -#: ../../source/tutorial-quickstart-huggingface.rst:332 +#: ../../source/tutorial-quickstart-huggingface.rst:318 msgid "" "To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" " identical signature to that of ``client_fn()`` but the return type is " @@ -25553,13 +25878,13 @@ msgid "" "value defined in the ``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:371 +#: ../../source/tutorial-quickstart-huggingface.rst:356 msgid "" "Congratulations! You've successfully built and run your first federated " "learning system for an LLM." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:376 +#: ../../source/tutorial-quickstart-huggingface.rst:361 msgid "" "Check the source code of the extended version of this tutorial in " "|quickstart_hf_link|_ in the Flower GitHub repository. For a " @@ -25573,11 +25898,11 @@ msgid "" "using Flower to train a neural network on MNIST." msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 iOS 应用程序,并在 MNIST 上训练神经网络。" -#: ../../source/tutorial-quickstart-ios.rst:5 +#: ../../source/tutorial-quickstart-ios.rst:4 msgid "Quickstart iOS" msgstr "快速入门 iOS" -#: ../../source/tutorial-quickstart-ios.rst:10 +#: ../../source/tutorial-quickstart-ios.rst:9 msgid "" "In this tutorial we will learn how to train a Neural Network on MNIST " "using Flower and CoreML on iOS devices." @@ -25595,13 +25920,13 @@ msgstr "" "`_ 中运行一切。对于在 iOS 中实现 " "Flower 客户端,建议使用 Xcode 作为我们的集成开发环境。" -#: ../../source/tutorial-quickstart-ios.rst:15 +#: ../../source/tutorial-quickstart-ios.rst:17 msgid "" "Our example consists of one Python *server* and two iPhone *clients* that" " all have the same model." msgstr "我们的示例包括一个 Python *服务器*和两个 iPhone *客户端*,它们都具有相同的模型。" -#: ../../source/tutorial-quickstart-ios.rst:17 +#: ../../source/tutorial-quickstart-ios.rst:20 msgid "" "*Clients* are responsible for generating individual weight updates for " "the model based on their local datasets. These updates are then sent to " @@ -25610,24 +25935,24 @@ msgid "" "each *client*. A complete cycle of weight updates is called a *round*." msgstr "*客户端*负责根据其本地数据集为模型生成独立的模型参数。然后,这些参数更新会被发送到*服务器*,由*服务器*汇总后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的参数更新周期称为一*轮*。" -#: ../../source/tutorial-quickstart-ios.rst:21 +#: ../../source/tutorial-quickstart-ios.rst:26 msgid "" "Now that we have a rough idea of what is going on, let's get started to " "setup our Flower server environment. We first need to install Flower. You" " can do this by using pip:" msgstr "现在我们已经有了一个大致的概念,让我们开始设置 Flower 服务器环境吧。首先,我们需要安装 Flower。你可以使用 pip 来安装:" -#: ../../source/tutorial-quickstart-ios.rst:27 +#: ../../source/tutorial-quickstart-ios.rst:33 msgid "Or Poetry:" msgstr "或者Poetry:" -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-xgboost.rst:55 +#: ../../source/tutorial-quickstart-ios.rst:40 +#: ../../source/tutorial-quickstart-scikitlearn.rst:43 +#: ../../source/tutorial-quickstart-xgboost.rst:65 msgid "Flower Client" msgstr "Flower 客户端" -#: ../../source/tutorial-quickstart-ios.rst:36 +#: ../../source/tutorial-quickstart-ios.rst:42 msgid "" "Now that we have all our dependencies installed, let's run a simple " "distributed training using CoreML as our local training pipeline and " @@ -25639,13 +25964,14 @@ msgstr "" "作为数据集,运行一个简单的分布式训练。为了简单起见,我们将使用 CoreML 的完整 Flower 客户端,该客户端已在 Swift SDK " "中实现并存储。客户端实现如下:" -#: ../../source/tutorial-quickstart-ios.rst:72 +#: ../../source/tutorial-quickstart-ios.rst:80 +#, fuzzy msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a" -" dependency in your project. For our application, we will store the logic" -" of our app in :code:`FLiOSModel.swift` and the UI elements in " -":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" -" in this quickstart. Please refer to the `full code example " +"Let's create a new application project in Xcode and add ``flwr`` as a " +"dependency in your project. For our application, we will store the logic " +"of our app in ``FLiOSModel.swift`` and the UI elements in " +"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" +" quickstart. Please refer to the `full code example " "`_ to learn more " "about the app." msgstr "" @@ -25654,26 +25980,28 @@ msgstr "" "元素。在本快速入门中,我们将更多地关注 :code:`FLiOSModel.swift`。请参阅 `完整代码示例 " "`_ 以了解更多有关应用程序的信息。" -#: ../../source/tutorial-quickstart-ios.rst:75 -msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#: ../../source/tutorial-quickstart-ios.rst:86 +#, fuzzy +msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" msgstr "在 :code:`FLiOSModel.swift` 中导入 Flower 和 CoreML 相关软件包:" -#: ../../source/tutorial-quickstart-ios.rst:83 +#: ../../source/tutorial-quickstart-ios.rst:94 +#, fuzzy msgid "" "Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " "will be bundled inside the application during deployment to your iOS " "device. We need to pass the url to access mlmodel and run CoreML machine " "learning processes, it can be retrieved by calling the function " -":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " -"into :code:`MLBatchProvider` object. The preprocessing is done inside " -":code:`DataLoader.swift`." +"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" +" ``MLBatchProvider`` object. The preprocessing is done inside " +"``DataLoader.swift``." msgstr "" "然后通过拖放将 mlmodel 添加到项目中,在部署到 iOS 设备时,mlmodel 将被捆绑到应用程序中。我们需要传递 url 以访问 " "mlmodel 并运行 CoreML 机器学习进程,可通过调用函数 :code:`Bundle.main.url` 获取。对于 MNIST " "数据集,我们需要将其预处理为 :code:`MLBatchProvider` 对象。预处理在 :code:`DataLoader.swift` " "中完成。" -#: ../../source/tutorial-quickstart-ios.rst:99 +#: ../../source/tutorial-quickstart-ios.rst:112 #, fuzzy msgid "" "Since CoreML does not allow the model parameters to be seen before " @@ -25681,66 +26009,68 @@ msgid "" " can only be done by specifying the layer name, we need to know this " "information beforehand, through looking at the model specification, which" " are written as proto files. The implementation can be seen in " -":code:`MLModelInspect`." +"``MLModelInspect``." msgstr "" "由于 CoreML 不允许在训练前查看模型参数,而在训练过程中或训练后访问模型参数只能通过指定层名来完成,因此我们需要事先通过查看模型规范(写成 " "proto 文件)来了解这些信息。具体实现可参见 :code:`MLModelInspect`。" -#: ../../source/tutorial-quickstart-ios.rst:102 +#: ../../source/tutorial-quickstart-ios.rst:118 #, fuzzy msgid "" "After we have all of the necessary information, let's create our Flower " "client." msgstr "获得所有必要信息后,让我们创建 Flower 客户端。" -#: ../../source/tutorial-quickstart-ios.rst:117 +#: ../../source/tutorial-quickstart-ios.rst:133 +#, fuzzy msgid "" "Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function :code:`startFlwrGRPC`." +"by passing our Flower client to the function ``startFlwrGRPC``." msgstr "然后启动 Flower gRPC 客户端,并通过将 Flower 客户端传递给函数 :code:`startFlwrGRPC` 来开始与服务器通信。" -#: ../../source/tutorial-quickstart-ios.rst:124 +#: ../../source/tutorial-quickstart-ios.rst:141 +#, fuzzy msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " -"The attribute :code:`hostname` and :code:`port` tells the client which " -"server to connect to. This can be done by entering the hostname and port " -"in the application before clicking the start button to start the " -"federated learning process." +"That's it for the client. We only have to implement ``Client`` or call " +"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" +" ``hostname`` and ``port`` tells the client which server to connect to. " +"This can be done by entering the hostname and port in the application " +"before clicking the start button to start the federated learning process." msgstr "" "这就是客户端。我们只需实现 :code:`Client` 或调用提供的 :code:`MLFlwrClient` 并调用 " ":code:`startFlwrGRPC()`。属性 :code:`hostname` 和 :code:`port` " "会告诉客户端要连接到哪个服务器。这可以通过在应用程序中输入主机名和端口来实现,然后再点击开始按钮启动联邦学习进程。" -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-xgboost.rst:341 +#: ../../source/tutorial-quickstart-ios.rst:148 +#: ../../source/tutorial-quickstart-scikitlearn.rst:179 +#: ../../source/tutorial-quickstart-xgboost.rst:358 msgid "Flower Server" msgstr "Flower 服务器" -#: ../../source/tutorial-quickstart-ios.rst:131 +#: ../../source/tutorial-quickstart-ios.rst:150 +#, fuzzy msgid "" "For simple workloads we can start a Flower server and leave all the " "configuration possibilities at their default values. In a file named " -":code:`server.py`, import Flower and start the server:" +"``server.py``, import Flower and start the server:" msgstr "" "对于简单的工作负载,我们可以启动 Flower 服务器,并将所有配置选项保留为默认值。在名为 :code:`server.py` 的文件中,导入 " "Flower 并启动服务器:" -#: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-scikitlearn.rst:230 +#: ../../source/tutorial-quickstart-ios.rst:161 +#: ../../source/tutorial-quickstart-scikitlearn.rst:254 msgid "Train the model, federated!" msgstr "联邦训练模型!" -#: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-xgboost.rst:567 +#: ../../source/tutorial-quickstart-ios.rst:163 +#: ../../source/tutorial-quickstart-xgboost.rst:590 msgid "" "With both client and server ready, we can now run everything and see " "federated learning in action. FL systems usually have a server and " "multiple clients. We therefore have to start the server first:" msgstr "客户端和服务器都已准备就绪,我们现在可以运行一切,看看联邦学习的实际效果。FL 系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" -#: ../../source/tutorial-quickstart-ios.rst:152 +#: ../../source/tutorial-quickstart-ios.rst:171 msgid "" "Once the server is running we can start the clients in different " "terminals. Build and run the client through your Xcode, one through Xcode" @@ -25754,12 +26084,13 @@ msgstr "" "`_。" -#: ../../source/tutorial-quickstart-ios.rst:156 +#: ../../source/tutorial-quickstart-ios.rst:177 +#, fuzzy msgid "" "Congratulations! You've successfully built and run your first federated " "learning system in your ios device. The full `source code " "`_ for this " -"example can be found in :code:`examples/ios`." +"example can be found in ``examples/ios``." msgstr "" "恭喜您! 您已经成功地在 ios 设备中构建并运行了第一个联邦学习系统。本示例的`完整源代码 " "`_ 可在 " @@ -25771,11 +26102,11 @@ msgid "" "with Jax to train a linear regression model on a scikit-learn dataset." msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Jax 在 scikit-learn 数据集上训练线性回归模型。" -#: ../../source/tutorial-quickstart-jax.rst:5 +#: ../../source/tutorial-quickstart-jax.rst:4 msgid "Quickstart JAX" msgstr "快速入门 JAX" -#: ../../source/tutorial-quickstart-jax.rst:10 +#: ../../source/tutorial-quickstart-jax.rst:9 msgid "" "This tutorial will show you how to use Flower to build a federated " "version of an existing JAX workload. We are using JAX to train a linear " @@ -25795,138 +26126,146 @@ msgstr "" "`_" " 教程构建集中式训练方法。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" -#: ../../source/tutorial-quickstart-jax.rst:16 +#: ../../source/tutorial-quickstart-jax.rst:20 +#, fuzzy msgid "" "Before we start building our JAX example, we need install the packages " -":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" +"``jax``, ``jaxlib``, ``scikit-learn``, and ``flwr``:" msgstr "" "在开始构建 JAX 示例之前,我们需要安装软件包 :code:`jax`、:code:`jaxlib`、:code:`scikit-learn` " "和 :code:`flwr`:" -#: ../../source/tutorial-quickstart-jax.rst:24 +#: ../../source/tutorial-quickstart-jax.rst:28 msgid "Linear Regression with JAX" msgstr "使用 JAX 进行线性回归" -#: ../../source/tutorial-quickstart-jax.rst:26 +#: ../../source/tutorial-quickstart-jax.rst:30 +#, fuzzy msgid "" "We begin with a brief description of the centralized training code based " -"on a :code:`Linear Regression` model. If you want a more in-depth " -"explanation of what's going on then have a look at the official `JAX " -"documentation `_." +"on a ``Linear Regression`` model. If you want a more in-depth explanation" +" of what's going on then have a look at the official `JAX documentation " +"`_." msgstr "" "首先,我们将简要介绍基于 :code:`Linear Regression` 模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 " "`JAX 文档 `_。" -#: ../../source/tutorial-quickstart-jax.rst:29 +#: ../../source/tutorial-quickstart-jax.rst:34 +#, fuzzy msgid "" -"Let's create a new file called :code:`jax_training.py` with all the " +"Let's create a new file called ``jax_training.py`` with all the " "components required for a traditional (centralized) linear regression " -"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " -"be imported. In addition, we need to import :code:`sklearn` since we use " -":code:`make_regression` for the dataset and :code:`train_test_split` to " -"split the dataset into a training and test set. You can see that we do " -"not yet import the :code:`flwr` package for federated learning. This will" -" be done later." +"training. First, the JAX packages ``jax`` and ``jaxlib`` need to be " +"imported. In addition, we need to import ``sklearn`` since we use " +"``make_regression`` for the dataset and ``train_test_split`` to split the" +" dataset into a training and test set. You can see that we do not yet " +"import the ``flwr`` package for federated learning. This will be done " +"later." msgstr "" "让我们创建一个名为 :code:`jax_training.py` 的新文件,其中包含传统(集中式)线性回归训练所需的所有组件。首先,需要导入 " "JAX 包 :code:`jax` 和 :code:`jaxlib`。此外,我们还需要导入 :code:`sklearn`,因为我们使用 " ":code:`make_regression` 创建数据集,并使用 :code:`train_test_split` " "将数据集拆分成训练集和测试集。您可以看到,我们还没有导入用于联邦学习的 :code:`flwr` 软件包,这将在稍后完成。" -#: ../../source/tutorial-quickstart-jax.rst:43 -msgid "" -"The :code:`load_data()` function loads the mentioned training and test " -"sets." +#: ../../source/tutorial-quickstart-jax.rst:51 +#, fuzzy +msgid "The ``load_data()`` function loads the mentioned training and test sets." msgstr ":code:`load_data()` 函数会加载上述训练集和测试集。" -#: ../../source/tutorial-quickstart-jax.rst:53 +#: ../../source/tutorial-quickstart-jax.rst:63 +#, fuzzy msgid "" -"The model architecture (a very simple :code:`Linear Regression` model) is" -" defined in :code:`load_model()`." +"The model architecture (a very simple ``Linear Regression`` model) is " +"defined in ``load_model()``." msgstr "模型结构(一个非常简单的 :code:`Linear Regression` 线性回归模型)在 :code:`load_model()` 中定义。" -#: ../../source/tutorial-quickstart-jax.rst:65 +#: ../../source/tutorial-quickstart-jax.rst:73 +#, fuzzy msgid "" -"We now need to define the training (function :code:`train()`), which " -"loops over the training set and measures the loss (function " -":code:`loss_fn()`) for each batch of training examples. The loss function" -" is separate since JAX takes derivatives with a :code:`grad()` function " -"(defined in the :code:`main()` function and called in :code:`train()`)." +"We now need to define the training (function ``train()``), which loops " +"over the training set and measures the loss (function ``loss_fn()``) for " +"each batch of training examples. The loss function is separate since JAX " +"takes derivatives with a ``grad()`` function (defined in the ``main()`` " +"function and called in ``train()``)." msgstr "" "现在,我们需要定义训练函数( :code:`train()`)。它循环遍历训练集,并计算每批训练数据的损失值(函数 " ":code:`loss_fn()`)。由于 JAX 使用 :code:`grad()` 函数提取导数(在 :code:`main()` " "函数中定义,并在 :code:`train()` 中调用),因此损失函数是独立的。" -#: ../../source/tutorial-quickstart-jax.rst:83 +#: ../../source/tutorial-quickstart-jax.rst:95 +#, fuzzy msgid "" -"The evaluation of the model is defined in the function " -":code:`evaluation()`. The function takes all test examples and measures " -"the loss of the linear regression model." +"The evaluation of the model is defined in the function ``evaluation()``. " +"The function takes all test examples and measures the loss of the linear " +"regression model." msgstr "模型的评估在函数 :code:`evaluation()` 中定义。该函数获取所有测试数据,并计算线性回归模型的损失值。" -#: ../../source/tutorial-quickstart-jax.rst:94 +#: ../../source/tutorial-quickstart-jax.rst:107 +#, fuzzy msgid "" "Having defined the data loading, model architecture, training, and " "evaluation we can put everything together and train our model using JAX. " -"As already mentioned, the :code:`jax.grad()` function is defined in " -":code:`main()` and passed to :code:`train()`." +"As already mentioned, the ``jax.grad()`` function is defined in " +"``main()`` and passed to ``train()``." msgstr "" "在定义了数据加载、模型架构、训练和评估之后,我们就可以把这些放在一起,使用 JAX " "训练我们的模型了。如前所述,:code:`jax.grad()` 函数在 :code:`main()` 中定义,并传递给 " ":code:`train()`。" -#: ../../source/tutorial-quickstart-jax.rst:111 +#: ../../source/tutorial-quickstart-jax.rst:126 msgid "You can now run your (centralized) JAX linear regression workload:" msgstr "现在您可以运行(集中式)JAX 线性回归工作了:" -#: ../../source/tutorial-quickstart-jax.rst:117 +#: ../../source/tutorial-quickstart-jax.rst:132 msgid "" "So far this should all look fairly familiar if you've used JAX before. " "Let's take the next step and use what we've built to create a simple " "federated learning system consisting of one server and two clients." msgstr "到目前为止,如果你以前使用过 JAX,就会对这一切感到很熟悉。下一步,让我们利用已构建的代码创建一个简单的联邦学习系统(一个服务器和两个客户端)。" -#: ../../source/tutorial-quickstart-jax.rst:121 +#: ../../source/tutorial-quickstart-jax.rst:137 msgid "JAX meets Flower" msgstr "JAX 结合 Flower" -#: ../../source/tutorial-quickstart-jax.rst:123 +#: ../../source/tutorial-quickstart-jax.rst:139 +#, fuzzy msgid "" "The concept of federating an existing workload is always the same and " "easy to understand. We have to start a *server* and then use the code in " -":code:`jax_training.py` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server*, which averages all received " -"parameter updates. This describes one round of the federated learning " -"process, and we repeat this for multiple rounds." +"``jax_training.py`` for the *clients* that are connected to the *server*." +" The *server* sends model parameters to the clients. The *clients* run " +"the training and update the parameters. The updated parameters are sent " +"back to the *server*, which averages all received parameter updates. This" +" describes one round of the federated learning process, and we repeat " +"this for multiple rounds." msgstr "" "把现有工作联邦化的概念始终是相同的,也很容易理解。我们要启动一个*服务器*,然后对连接到*服务器*的*客户端*运行 " ":code:`jax_training.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后服务器对所有收到的参数进行平均聚合。以上的描述构成了一轮联邦学习,我们将重复进行多轮学习。" -#: ../../source/tutorial-quickstart-jax.rst:145 +#: ../../source/tutorial-quickstart-jax.rst:167 +#, fuzzy msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined JAX training in :code:`jax_training.py`. Our" -" *client* needs to import :code:`flwr`, but also :code:`jax` and " -":code:`jaxlib` to update the parameters on our JAX model:" +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined JAX training in ``jax_training.py``. Our " +"*client* needs to import ``flwr``, but also ``jax`` and ``jaxlib`` to " +"update the parameters on our JAX model:" msgstr "" "最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 " ":code:`jax_training.py` 中定义的 JAX 训练为基础。我们的 *client* 需要导入 " ":code:`flwr`,还需要导入 :code:`jax` 和 :code:`jaxlib` 以更新 JAX 模型的参数:" -#: ../../source/tutorial-quickstart-jax.rst:160 +#: ../../source/tutorial-quickstart-jax.rst:182 +#, fuzzy msgid "" "Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " -"easier to implement than :code:`Client` if you use a framework with good " -"NumPy interoperability (like JAX) because it avoids some of the " -"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" -" to implement four methods, two methods for getting/setting model " -"parameters, one method for training the model, and one method for testing" -" the model:" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``FlowerClient``. ``NumPyClient`` is slightly easier to implement" +" than ``Client`` if you use a framework with good NumPy interoperability " +"(like JAX) because it avoids some of the boilerplate that would otherwise" +" be necessary. ``FlowerClient`` needs to implement four methods, two " +"methods for getting/setting model parameters, one method for training the" +" model, and one method for testing the model:" msgstr "" "实现一个 Flower *client*基本上意味着去实现一个 :code:`flwr.client.Client` 或 " ":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " @@ -25935,58 +26274,61 @@ msgstr "" ":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`FlowerClient` " "需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid ":code:`set_parameters (optional)`" +#: ../../source/tutorial-quickstart-jax.rst:194 +#, fuzzy +msgid "``set_parameters (optional)``" msgstr ":code:`set_parameters (可选)`" -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid "transform parameters to NumPy :code:`ndarray`'s" +#: ../../source/tutorial-quickstart-jax.rst:193 +#, fuzzy +msgid "transform parameters to NumPy ``ndarray``'s" msgstr "将参数转换为 NumPy :code:`ndarray`格式" -#: ../../source/tutorial-quickstart-jax.rst:174 +#: ../../source/tutorial-quickstart-jax.rst:203 msgid "get the updated local model parameters and return them to the server" msgstr "获取更新后的本地模型参数并返回服务器" -#: ../../source/tutorial-quickstart-jax.rst:178 +#: ../../source/tutorial-quickstart-jax.rst:208 msgid "return the local loss to the server" msgstr "向服务器返回本地损失值" -#: ../../source/tutorial-quickstart-jax.rst:180 +#: ../../source/tutorial-quickstart-jax.rst:210 +#, fuzzy msgid "" "The challenging part is to transform the JAX model parameters from " -":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" -" `NumPyClient`." +"``DeviceArray`` to ``NumPy ndarray`` to make them compatible with " +"`NumPyClient`." msgstr "" "具有挑战性的部分是将 JAX 模型参数从 :code:`DeviceArray` 转换为 :code:`NumPy ndarray`,使其与 " "`NumPyClient` 兼容。" -#: ../../source/tutorial-quickstart-jax.rst:182 +#: ../../source/tutorial-quickstart-jax.rst:213 +#, fuzzy msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`evaluate()` previously " -"defined in :code:`jax_training.py`. So what we really do here is we tell " -"Flower through our :code:`NumPyClient` subclass which of our already " -"defined functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``evaluate()`` previously defined in " +"``jax_training.py``. So what we really do here is we tell Flower through " +"our ``NumPyClient`` subclass which of our already defined functions to " +"call for training and evaluation. We included type annotations to give " +"you a better understanding of the data types that get passed around." msgstr "" "这两个 :code:`NumPyClient` 方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " ":code:`jax_training.py` 中定义的函数 :code:`train()` 和 " ":code:`evaluate()`。因此,我们在这里要做的就是通过 :code:`NumPyClient` 子类告知 Flower " "在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" -#: ../../source/tutorial-quickstart-jax.rst:251 +#: ../../source/tutorial-quickstart-jax.rst:286 msgid "Having defined the federation process, we can run it." msgstr "定义了联邦进程后,我们就可以运行它了。" -#: ../../source/tutorial-quickstart-jax.rst:280 +#: ../../source/tutorial-quickstart-jax.rst:315 msgid "" "in each window (make sure that the server is still running before you do " "so) and see your JAX project run federated learning across two clients. " "Congratulations!" msgstr "确保服务器仍在运行,然后在每个客户端窗口就能看到你的 JAX 项目在两个客户端上运行联邦学习了。祝贺!" -#: ../../source/tutorial-quickstart-jax.rst:285 +#: ../../source/tutorial-quickstart-jax.rst:321 msgid "" "The source code of this example was improved over time and can be found " "here: `Quickstart JAX `_。我们的示例有些过于简单,因为两个客户端都加载了相同的数据集。" -#: ../../source/tutorial-quickstart-jax.rst:288 +#: ../../source/tutorial-quickstart-jax.rst:325 msgid "" "You're now prepared to explore this topic further. How about using a more" " sophisticated model or using a different dataset? How about adding more " "clients?" msgstr "现在,您已准备好进行更深一步探索了。例如使用更复杂的模型或使用不同的数据集会如何?增加更多客户端会如何?" -#: ../../source/tutorial-quickstart-mlx.rst:5 +#: ../../source/tutorial-quickstart-mlx.rst:4 #, fuzzy msgid "Quickstart MLX" msgstr "快速入门 JAX" -#: ../../source/tutorial-quickstart-mlx.rst:7 +#: ../../source/tutorial-quickstart-mlx.rst:6 #, fuzzy msgid "" "In this federated learning tutorial we will learn how to train simple MLP" @@ -26020,7 +26362,7 @@ msgstr "" "首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" -#: ../../source/tutorial-quickstart-mlx.rst:12 +#: ../../source/tutorial-quickstart-mlx.rst:10 msgid "" "Let's use `flwr new` to create a complete Flower+MLX project. It will " "generate all the files needed to run, by default with the Simulation " @@ -26032,24 +26374,24 @@ msgid "" "api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner>`_." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:27 +#: ../../source/tutorial-quickstart-mlx.rst:25 msgid "" "Then, run the command below. You will be prompted to select of the " "available templates (choose ``MLX``), give a name to your project, and " "type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:57 +#: ../../source/tutorial-quickstart-mlx.rst:53 msgid "To run the project do:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:106 +#: ../../source/tutorial-quickstart-mlx.rst:102 msgid "" "You can also override the parameters defined in " "``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:122 +#: ../../source/tutorial-quickstart-mlx.rst:116 msgid "" "We will use `Flower Datasets `_ to " "easily download and partition the `MNIST` dataset. In this example you'll" @@ -26060,20 +26402,20 @@ msgid "" "api/flwr_datasets.partitioner.html>`_ available in Flower Datasets:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:166 +#: ../../source/tutorial-quickstart-mlx.rst:157 msgid "" "We define the model as in the `centralized MLX example " "`_, it's a " "simple MLP:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:190 +#: ../../source/tutorial-quickstart-mlx.rst:180 msgid "" "We also define some utility functions to test our model and to iterate " "over batches." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:212 +#: ../../source/tutorial-quickstart-mlx.rst:201 msgid "" "The main changes we have to make to use `MLX` with `Flower` will be found" " in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " @@ -26082,17 +26424,17 @@ msgid "" "messages to work)." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:218 +#: ../../source/tutorial-quickstart-mlx.rst:206 msgid "The way MLX stores its parameters is as follows:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:231 +#: ../../source/tutorial-quickstart-mlx.rst:219 msgid "" "Therefore, to get our list of ``np.array`` objects, we need to extract " "each array and convert them into a NumPy array:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:240 +#: ../../source/tutorial-quickstart-mlx.rst:228 msgid "" "For the ``set_params()`` function, we perform the reverse operation. We " "receive a list of NumPy arrays and want to convert them into MLX " @@ -26100,24 +26442,24 @@ msgid "" "them to the `weight` and `bias` keys of each layer dict:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:255 +#: ../../source/tutorial-quickstart-mlx.rst:243 msgid "" "The rest of the functionality is directly inspired by the centralized " "case. The ``fit()`` method in the client trains the model using the local" " dataset:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:272 +#: ../../source/tutorial-quickstart-mlx.rst:259 msgid "" "Here, after updating the parameters, we perform the training as in the " "centralized case, and return the new parameters." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:275 +#: ../../source/tutorial-quickstart-mlx.rst:262 msgid "And for the ``evaluate()`` method of the client:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:285 +#: ../../source/tutorial-quickstart-mlx.rst:272 msgid "" "We also begin by updating the parameters with the ones sent by the " "server, and then we compute the loss and accuracy using the functions " @@ -26125,12 +26467,12 @@ msgid "" "the `MLP` model as well as other components such as the optimizer." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:290 +#: ../../source/tutorial-quickstart-mlx.rst:277 #, fuzzy msgid "Putting everything together we have:" msgstr "把所有东西放在一起" -#: ../../source/tutorial-quickstart-mlx.rst:344 +#: ../../source/tutorial-quickstart-mlx.rst:331 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " "defined above by means of a ``client_fn()`` callback. Note that " @@ -26141,7 +26483,7 @@ msgid "" "method." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:378 +#: ../../source/tutorial-quickstart-mlx.rst:363 msgid "" "To construct a ``ServerApp``, we define a ``server_fn()`` callback with " "an identical signature to that of ``client_fn()``, but the return type is" @@ -26152,15 +26494,15 @@ msgid "" "``FedAvg`` strategy." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:402 -#: ../../source/tutorial-quickstart-pytorch.rst:360 -#: ../../source/tutorial-quickstart-tensorflow.rst:279 +#: ../../source/tutorial-quickstart-mlx.rst:386 +#: ../../source/tutorial-quickstart-pytorch.rst:344 +#: ../../source/tutorial-quickstart-tensorflow.rst:266 msgid "" "Congratulations! You've successfully built and run your first federated " "learning system." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:407 +#: ../../source/tutorial-quickstart-mlx.rst:390 #, fuzzy msgid "" "Check the `source code `_ " @@ -26199,7 +26541,7 @@ msgid "" "with PyTorch to train a CNN model on MNIST." msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch 在 MNIST 上训练 CNN 模型。" -#: ../../source/tutorial-quickstart-pytorch.rst:7 +#: ../../source/tutorial-quickstart-pytorch.rst:6 #, fuzzy msgid "" "In this federated learning tutorial we will learn how to train a " @@ -26210,7 +26552,7 @@ msgstr "" "首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" -#: ../../source/tutorial-quickstart-pytorch.rst:12 +#: ../../source/tutorial-quickstart-pytorch.rst:11 msgid "" "Let's use `flwr new` to create a complete Flower+PyTorch project. It will" " generate all the files needed to run, by default with the Flower " @@ -26222,14 +26564,14 @@ msgid "" "api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner>`_." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:27 +#: ../../source/tutorial-quickstart-pytorch.rst:26 msgid "" "Then, run the command below. You will be prompted to select one of the " "available templates (choose ``PyTorch``), give a name to your project, " "and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:121 +#: ../../source/tutorial-quickstart-pytorch.rst:117 msgid "" "This tutorial uses `Flower Datasets `_ " "to easily download and partition the `CIFAR-10` dataset. In this example " @@ -26243,13 +26585,13 @@ msgid "" " that correspond to their data partition." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:159 +#: ../../source/tutorial-quickstart-pytorch.rst:152 msgid "" "We defined a simple Convolutional Neural Network (CNN), but feel free to " "replace it with a more sophisticated model if you'd like:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:184 +#: ../../source/tutorial-quickstart-pytorch.rst:177 msgid "" "In addition to defining the model architecture, we also include two " "utility functions to perform both training (i.e. ``train()``) and " @@ -26262,7 +26604,7 @@ msgid "" "training or evaluation:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:236 +#: ../../source/tutorial-quickstart-pytorch.rst:226 msgid "" "The main changes we have to make to use `PyTorch` with `Flower` will be " "found in the ``get_weights()`` and ``set_weights()`` functions. In " @@ -26272,7 +26614,7 @@ msgid "" "PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:294 +#: ../../source/tutorial-quickstart-pytorch.rst:282 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " "defined above by means of a ``client_fn()`` callback. Note that the " @@ -26283,7 +26625,7 @@ msgid "" "additioinal hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:323 +#: ../../source/tutorial-quickstart-pytorch.rst:309 msgid "" "To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" " identical signature to that of ``client_fn()`` but the return type is " @@ -26297,7 +26639,7 @@ msgid "" "``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:365 +#: ../../source/tutorial-quickstart-pytorch.rst:348 #, fuzzy msgid "" "Check the `source code `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" -#: ../../source/tutorial-quickstart-pytorch.rst:372 -#: ../../source/tutorial-quickstart-tensorflow.rst:295 +#: ../../source/tutorial-quickstart-pytorch.rst:354 +#: ../../source/tutorial-quickstart-tensorflow.rst:278 #, fuzzy msgid "Video tutorial" msgstr "教程" -#: ../../source/tutorial-quickstart-pytorch.rst:376 +#: ../../source/tutorial-quickstart-pytorch.rst:358 msgid "" "The video shown below shows how to setup a PyTorch + Flower project using" " our previously recommended APIs. A new video tutorial will be released " "that shows the new APIs (as the content above does)" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 msgid "Quickstart PyTorch Lightning" msgstr "快速入门 PyTorch Lightning" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:7 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 #, fuzzy msgid "" "In this federated learning tutorial we will learn how to train an " @@ -26335,13 +26677,13 @@ msgstr "" "首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:20 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 msgid "" "This will create a new directory called `quickstart-pytorch-lightning` " "containing the following files:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:43 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 msgid "" "By default, Flower Simulation Engine will be started and it will create a" " federation of 4 nodes using `FedAvg `_ 中运行所有内容。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +#: ../../source/tutorial-quickstart-scikitlearn.rst:15 msgid "" "Our example consists of one *server* and two *clients* all having the " "same model." msgstr "我们的例子包括一个*服务器*和两个*客户端*,它们都有相同的模型。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +#: ../../source/tutorial-quickstart-scikitlearn.rst:17 msgid "" "*Clients* are responsible for generating individual model parameter " "updates for the model based on their local datasets. These updates are " @@ -26411,80 +26754,86 @@ msgid "" "called a *round*." msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。然后,这些参数更新将被发送到*服务器*,由*服务器*汇总后生成一个更新的全局模型。最后,*服务器*将这一改进版模型发回给每个*客户端*。一个完整的参数更新周期称为一*轮*。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +#: ../../source/tutorial-quickstart-scikitlearn.rst:23 msgid "" "Now that we have a rough idea of what is going on, let's get started. We " "first need to install Flower. You can do this by running:" msgstr "现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运行:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:26 +#: ../../source/tutorial-quickstart-scikitlearn.rst:30 #, fuzzy msgid "Since we want to use scikit-learn, let's go ahead and install it:" msgstr "既然我们要使用 scikt-learn,那就继续安装吧:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:32 +#: ../../source/tutorial-quickstart-scikitlearn.rst:36 msgid "Or simply install all dependencies using Poetry:" msgstr "或者直接使用 Poetry 安装所有依赖项:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +#, fuzzy msgid "" "Now that we have all our dependencies installed, let's run a simple " "distributed training with two clients and one server. However, before " "setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within :code:`utils.py`. The " -":code:`utils.py` contains different functions defining all the machine " +" we need for our federated learning setup within ``utils.py``. The " +"``utils.py`` contains different functions defining all the machine " "learning basics:" msgstr "" "现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。不过,在设置客户端和服务器之前,我们将在 " ":code:`utils.py` 中定义联邦学习设置所需的所有功能。:code:`utils.py`包含定义所有机器学习基础知识的不同函数:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid ":code:`get_model_parameters()`" +#: ../../source/tutorial-quickstart-scikitlearn.rst:51 +#, fuzzy +msgid "``get_model_parameters()``" msgstr ":code:`get_model_parameters()`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:46 -msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#, fuzzy +msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" msgstr "返回 :code:`sklearn` LogisticRegression 模型的参数" -#: ../../source/tutorial-quickstart-scikitlearn.rst:47 -msgid ":code:`set_model_params()`" +#: ../../source/tutorial-quickstart-scikitlearn.rst:53 +#, fuzzy +msgid "``set_model_params()``" msgstr ":code:`set_model_params()`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:48 +#: ../../source/tutorial-quickstart-scikitlearn.rst:54 #, fuzzy -msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" msgstr "设置:code:`sklean`的LogisticRegression模型的参数" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid ":code:`set_initial_params()`" +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +#, fuzzy +msgid "``set_initial_params()``" msgstr ":code:`set_initial_params()`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 msgid "Initializes the model parameters that the Flower server will ask for" msgstr "初始化 Flower 服务器将要求的模型参数" -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#: ../../source/tutorial-quickstart-scikitlearn.rst:58 +#, fuzzy msgid "" -"Please check out :code:`utils.py` `here " +"Please check out ``utils.py`` `here " "`_ for more details. The pre-defined functions are used in" -" the :code:`client.py` and imported. The :code:`client.py` also requires " -"to import several packages such as Flower and scikit-learn:" +" the ``client.py`` and imported. The ``client.py`` also requires to " +"import several packages such as Flower and scikit-learn:" msgstr "" "更多详情请查看 :code:`utils.py`` 这里 " "`_。在 :code:`client.py` 中使用并导入了预定义函数。:code:`client.py` " "还需要导入几个软件包,如 Flower 和 scikit-learn:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:67 +#: ../../source/tutorial-quickstart-scikitlearn.rst:75 #, fuzzy msgid "" "Prior to local training, we need to load the MNIST dataset, a popular " "image classification dataset of handwritten digits for machine learning, " "and partition the dataset for FL. This can be conveniently achieved using" " `Flower Datasets `_. The " -":code:`FederatedDataset.load_partition()` method loads the partitioned " -"training set for each partition ID defined in the :code:`--partition-id` " +"``FederatedDataset.load_partition()`` method loads the partitioned " +"training set for each partition ID defined in the ``--partition-id`` " "argument." msgstr "" "在本地训练之前,我们需要加载 MNIST 数据集(一个用于机器学习的流行手写数字图像分类数据集),并对数据集进行 FL 分区。使用 " @@ -26492,93 +26841,98 @@ msgstr "" "`_\"可以方便地实现这一点。:code:`FederatedDataset.load_partition()`" " 方法为 :code:`--partition-id` 参数中定义的每个分区 ID 加载分区训练集。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:95 +#: ../../source/tutorial-quickstart-scikitlearn.rst:106 +#, fuzzy msgid "" "Next, the logistic regression model is defined and initialized with " -":code:`utils.set_initial_params()`." +"``utils.set_initial_params()``." msgstr "接下来,使用 :code:`utils.set_initial_params()` 对逻辑回归模型进行定义和初始化。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:107 +#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +#, fuzzy msgid "" "The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to fit the logistic regression we defined earlier)." +"``Client``. When the server selects a particular client for training, it " +"sends training instructions over the network. The client receives those " +"instructions and calls one of the ``Client`` methods to run your code " +"(i.e., to fit the logistic regression we defined earlier)." msgstr "" "Flower 服务器通过一个名为 :code:`Client` " "的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 :code:`Client`" " 方法之一来运行您的代码(即拟合我们之前定义的逻辑回归)。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +#, fuzzy msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " -"means defining the following methods (:code:`set_parameters` is optional " -"though):" +"Flower provides a convenience class called ``NumPyClient`` which makes it" +" easier to implement the ``Client`` interface when your workload uses " +"scikit-learn. Implementing ``NumPyClient`` usually means defining the " +"following methods (``set_parameters`` is optional though):" msgstr "" "Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当你的工作负载使用 scikit-learn " "时,它可以让你更容易地实现 :code:`Client` 接口。实现 :code:`NumPyClient` " "通常意味着定义以下方法(:code:`set_parameters` 是可选的):" -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +#: ../../source/tutorial-quickstart-scikitlearn.rst:130 msgid "return the model weight as a list of NumPy ndarrays" msgstr "以 NumPy ndarrays 列表形式返回模型参数" -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 -msgid ":code:`set_parameters` (optional)" +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +#, fuzzy +msgid "``set_parameters`` (optional)" msgstr ":code:`set_parameters` (可选)" -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 msgid "" "update the local model weights with the parameters received from the " "server" msgstr "用从服务器接收到的参数更新本地模型参数" -#: ../../source/tutorial-quickstart-scikitlearn.rst:122 -msgid "is directly imported with :code:`utils.set_model_params()`" +#: ../../source/tutorial-quickstart-scikitlearn.rst:133 +#, fuzzy +msgid "is directly imported with ``utils.set_model_params()``" msgstr "直接导入 :code:`utils.set_model_params()`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +#: ../../source/tutorial-quickstart-scikitlearn.rst:135 msgid "set the local model weights" msgstr "设置本地模型参数" -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 msgid "train the local model" msgstr "训练本地模型" -#: ../../source/tutorial-quickstart-scikitlearn.rst:126 +#: ../../source/tutorial-quickstart-scikitlearn.rst:137 #, fuzzy msgid "return the updated local model weights" msgstr "接收更新的本地模型参数" -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 msgid "test the local model" msgstr "测试本地模型" -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 +#: ../../source/tutorial-quickstart-scikitlearn.rst:141 msgid "The methods can be implemented in the following way:" msgstr "这些方法可以通过以下方式实现:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:153 +#: ../../source/tutorial-quickstart-scikitlearn.rst:163 +#, fuzzy msgid "" -"We can now create an instance of our class :code:`MnistClient` and add " -"one line to actually run this client:" +"We can now create an instance of our class ``MnistClient`` and add one " +"line to actually run this client:" msgstr "现在我们可以创建一个 :code:`MnistClient` 类的实例,并添加一行来实际运行该客户端:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:160 +#: ../../source/tutorial-quickstart-scikitlearn.rst:170 #, fuzzy msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" -" the client which server to connect to. In our case we can run the server" -" and the client on the same machine, therefore we use " -":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we pass to the client." +"That's it for the client. We only have to implement ``Client`` or " +"``NumPyClient`` and call ``fl.client.start_client()``. If you implement a" +" client of type ``NumPyClient`` you'll need to first call its " +"``to_client()`` method. The string ``\"0.0.0.0:8080\"`` tells the client " +"which server to connect to. In our case we can run the server and the " +"client on the same machine, therefore we use ``\"0.0.0.0:8080\"``. If we " +"run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" pass to the client." msgstr "" "这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 " ":code:`fl.client.start_client()` 或 " @@ -26587,7 +26941,7 @@ msgstr "" ":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" " :code:`server_address`。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:169 +#: ../../source/tutorial-quickstart-scikitlearn.rst:181 msgid "" "The following Flower server is a little bit more advanced and returns an " "evaluation function for the server-side evaluation. First, we import " @@ -26596,32 +26950,34 @@ msgstr "" "下面的 Flower 服务器更先进一些,会返回一个用于服务器端评估的评估函数。首先,我们再次导入所有需要的库,如 Flower 和 scikit-" "learn。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:172 -msgid ":code:`server.py`, import Flower and start the server:" +#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +#, fuzzy +msgid "``server.py``, import Flower and start the server:" msgstr ":code:`server.py`, 导入 Flower 并启动服务器:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 #, fuzzy msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and" -" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " -"function is called after each federated learning round and gives you " -"information about loss and accuracy. Note that we also make use of Flower" -" Datasets here to load the test split of the MNIST dataset for server-" -"side evaluation." +"The number of federated learning rounds is set in ``fit_round()`` and the" +" evaluation is defined in ``get_evaluate_fn()``. The evaluation function " +"is called after each federated learning round and gives you information " +"about loss and accuracy. Note that we also make use of Flower Datasets " +"here to load the test split of the MNIST dataset for server-side " +"evaluation." msgstr "" "联邦学习轮数在 :code:`fit_round()` 中设置,评估在 :code:`get_evaluate_fn()` " "中定义。每轮联邦学习后都会调用评估函数,并提供有关损失值和准确率的信息。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:213 +#: ../../source/tutorial-quickstart-scikitlearn.rst:228 +#, fuzzy msgid "" -"The :code:`main` contains the server-side parameter initialization " -":code:`utils.set_initial_params()` as well as the aggregation strategy " -":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " +"The ``main`` contains the server-side parameter initialization " +"``utils.set_initial_params()`` as well as the aggregation strategy " +"``fl.server.strategy:FedAvg()``. The strategy is the default one, " "federated averaging (or FedAvg), with two clients and evaluation after " "each federated learning round. The server can be started with the command" -" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +" ``fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))``." msgstr "" ":code:`main`包含服务器端参数初始化:code:`utils.set_initial_params()`以及聚合策略 " ":code:`fl.server.strategy:FedAvg()`。该策略是默认的联邦平均(或 " @@ -26629,7 +26985,7 @@ msgstr "" ":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " "strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))` 命令启动服务器。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:232 +#: ../../source/tutorial-quickstart-scikitlearn.rst:256 msgid "" "With both client and server ready, we can now run everything and see " "federated learning in action. Federated learning systems usually have a " @@ -26637,33 +26993,34 @@ msgid "" "first:" msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-xgboost.rst:575 +#: ../../source/tutorial-quickstart-scikitlearn.rst:264 +#: ../../source/tutorial-quickstart-xgboost.rst:598 msgid "" "Once the server is running we can start the clients in different " "terminals. Open a new terminal and start the first client:" msgstr "服务器运行后,我们就可以在不同终端启动客户端了。打开一个新终端,启动第一个客户端:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-xgboost.rst:582 +#: ../../source/tutorial-quickstart-scikitlearn.rst:271 +#: ../../source/tutorial-quickstart-xgboost.rst:605 msgid "Open another terminal and start the second client:" msgstr "打开另一台终端,启动第二个客户端:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:252 -#: ../../source/tutorial-quickstart-xgboost.rst:588 +#: ../../source/tutorial-quickstart-scikitlearn.rst:277 +#: ../../source/tutorial-quickstart-xgboost.rst:611 msgid "" "Each client will have its own dataset. You should now see how the " "training does in the very first terminal (the one that started the " "server):" msgstr "每个客户端都有自己的数据集。现在你应该看到第一个终端(启动服务器的终端)的训练效果了:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:286 +#: ../../source/tutorial-quickstart-scikitlearn.rst:311 +#, fuzzy msgid "" "Congratulations! You've successfully built and run your first federated " "learning system. The full `source code " "`_ for this example can be found in :code:`examples/sklearn-logreg-" -"mnist`." +"mnist>`_ for this example can be found in ``examples/sklearn-logreg-" +"mnist``." msgstr "" "恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " "`_ 中运行一切。" -#: ../../source/tutorial-quickstart-tensorflow.rst:13 +#: ../../source/tutorial-quickstart-tensorflow.rst:11 msgid "" "Let's use `flwr new` to create a complete Flower+TensorFlow project. It " "will generate all the files needed to run, by default with the Flower " @@ -26703,14 +27060,14 @@ msgid "" "api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner>`_." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:28 +#: ../../source/tutorial-quickstart-tensorflow.rst:26 msgid "" "Then, run the command below. You will be prompted to select one of the " "available templates (choose ``TensorFlow``), give a name to your project," " and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:118 +#: ../../source/tutorial-quickstart-tensorflow.rst:114 msgid "" "This tutorial uses `Flower Datasets `_ " "to easily download and partition the `CIFAR-10` dataset. In this example " @@ -26724,14 +27081,14 @@ msgid "" " correspond to their data partition." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:147 +#: ../../source/tutorial-quickstart-tensorflow.rst:141 msgid "" "Next, we need a model. We defined a simple Convolutional Neural Network " "(CNN), but feel free to replace it with a more sophisticated model if " "you'd like:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:178 +#: ../../source/tutorial-quickstart-tensorflow.rst:170 msgid "" "With `TensorFlow`, we can use the built-in ``get_weights()`` and " "``set_weights()`` functions, which simplifies the implementation with " @@ -26742,7 +27099,7 @@ msgid "" "set that the client might have:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:212 +#: ../../source/tutorial-quickstart-tensorflow.rst:203 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " "defined above by means of a ``client_fn()`` callback. Note that the " @@ -26754,7 +27111,7 @@ msgid "" "``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:247 +#: ../../source/tutorial-quickstart-tensorflow.rst:234 msgid "" "To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" " identical signature to that of ``client_fn()`` but the return type is " @@ -26766,7 +27123,7 @@ msgid "" "the global model to federate." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:284 +#: ../../source/tutorial-quickstart-tensorflow.rst:270 #, fuzzy msgid "" "Check the source code of the extended version of this tutorial in " @@ -26775,7 +27132,7 @@ msgstr "" "此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" -#: ../../source/tutorial-quickstart-tensorflow.rst:299 +#: ../../source/tutorial-quickstart-tensorflow.rst:282 msgid "" "The video shown below shows how to setup a TensorFlow + Flower project " "using our previously recommended APIs. A new video tutorial will be " @@ -26788,15 +27145,15 @@ msgid "" "with XGBoost to train classification models on trees." msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 XGBoost 上训练分类模型。" -#: ../../source/tutorial-quickstart-xgboost.rst:5 +#: ../../source/tutorial-quickstart-xgboost.rst:4 msgid "Quickstart XGBoost" msgstr "XGBoost快速入门" -#: ../../source/tutorial-quickstart-xgboost.rst:14 +#: ../../source/tutorial-quickstart-xgboost.rst:13 msgid "Federated XGBoost" msgstr "联邦化 XGBoost" -#: ../../source/tutorial-quickstart-xgboost.rst:16 +#: ../../source/tutorial-quickstart-xgboost.rst:15 msgid "" "EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " "implementation of gradient-boosted decision tree (**GBDT**), that " @@ -26809,18 +27166,18 @@ msgstr "" "Boosting(**XGBoost**)是梯度提升决策树(**GBDT**)的一种稳健而高效的实现方法,能最大限度地提高提升树方法的计算边界。它主要用于提高机器学习模型的性能和计算速度。在" " XGBoost 中,决策树是并发构建的,与 GBDT 采用的顺序方法不同。" -#: ../../source/tutorial-quickstart-xgboost.rst:20 +#: ../../source/tutorial-quickstart-xgboost.rst:21 msgid "" "Often, for tabular data on medium-sized datasets with fewer than 10k " "training examples, XGBoost surpasses the results of deep learning " "techniques." msgstr "对于训练示例少于 10k 的中型数据集上的表格数据,XGBoost 的结果往往超过深度学习技术。" -#: ../../source/tutorial-quickstart-xgboost.rst:23 +#: ../../source/tutorial-quickstart-xgboost.rst:25 msgid "Why federated XGBoost?" msgstr "为什么选择联邦 XGBoost?" -#: ../../source/tutorial-quickstart-xgboost.rst:25 +#: ../../source/tutorial-quickstart-xgboost.rst:27 msgid "" "Indeed, as the demand for data privacy and decentralized learning grows, " "there's an increasing requirement to implement federated XGBoost systems " @@ -26828,7 +27185,7 @@ msgid "" "detection." msgstr "事实上,随着对数据隐私和分散学习的需求不断增长,越来越多的专业应用(如生存分析和金融欺诈检测)需要实施联邦 XGBoost 系统。" -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/tutorial-quickstart-xgboost.rst:31 msgid "" "Federated learning ensures that raw data remains on the local device, " "making it an attractive approach for sensitive domains where data " @@ -26839,10 +27196,11 @@ msgstr "" "联邦学习可确保原始数据保留在本地设备上,因此对于数据安全和隐私至关重要的敏感领域来说,这是一种极具吸引力的方法。鉴于 XGBoost " "的稳健性和高效性,将其与联邦学习相结合为应对这些特定挑战提供了一种前景广阔的解决方案。" -#: ../../source/tutorial-quickstart-xgboost.rst:30 +#: ../../source/tutorial-quickstart-xgboost.rst:36 +#, fuzzy msgid "" "In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " +"HIGGS dataset using Flower and ``xgboost`` package. We use a simple " "example (`full code xgboost-quickstart " "`_)" " with two *clients* and one *server* to demonstrate how federated XGBoost" @@ -26857,11 +27215,11 @@ msgstr "" "comprehensive `_),以运行各种实验。" -#: ../../source/tutorial-quickstart-xgboost.rst:37 +#: ../../source/tutorial-quickstart-xgboost.rst:46 msgid "Environment Setup" msgstr "环境设定" -#: ../../source/tutorial-quickstart-xgboost.rst:39 +#: ../../source/tutorial-quickstart-xgboost.rst:48 #, fuzzy msgid "" "First of all, it is recommended to create a virtual environment and run " @@ -26871,19 +27229,20 @@ msgstr "" "首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" -#: ../../source/tutorial-quickstart-xgboost.rst:41 +#: ../../source/tutorial-quickstart-xgboost.rst:51 msgid "" "We first need to install Flower and Flower Datasets. You can do this by " "running :" msgstr "我们首先需要安装 Flower 和 Flower Datasets。您可以通过运行 :" -#: ../../source/tutorial-quickstart-xgboost.rst:47 +#: ../../source/tutorial-quickstart-xgboost.rst:57 +#, fuzzy msgid "" -"Since we want to use :code:`xgboost` package to build up XGBoost trees, " -"let's go ahead and install :code:`xgboost`:" +"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" +" go ahead and install ``xgboost``:" msgstr "既然我们要使用 :code:`xgboost` 软件包来构建 XGBoost 树,那就继续安装 :code:`xgboost`:" -#: ../../source/tutorial-quickstart-xgboost.rst:57 +#: ../../source/tutorial-quickstart-xgboost.rst:67 msgid "" "*Clients* are responsible for generating individual weight-updates for " "the model based on their local datasets. Now that we have all our " @@ -26891,153 +27250,159 @@ msgid "" "clients and one server." msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。" -#: ../../source/tutorial-quickstart-xgboost.rst:60 +#: ../../source/tutorial-quickstart-xgboost.rst:71 +#, fuzzy msgid "" -"In a file called :code:`client.py`, import xgboost, Flower, Flower " -"Datasets and other related functions:" +"In a file called ``client.py``, import xgboost, Flower, Flower Datasets " +"and other related functions:" msgstr "在名为 :code:`client.py` 的文件中,导入 xgboost、Flower、Flower Datasets 和其他相关函数:" -#: ../../source/tutorial-quickstart-xgboost.rst:87 +#: ../../source/tutorial-quickstart-xgboost.rst:99 msgid "Dataset partition and hyper-parameter selection" msgstr "数据集划分和超参数选择" -#: ../../source/tutorial-quickstart-xgboost.rst:89 +#: ../../source/tutorial-quickstart-xgboost.rst:101 msgid "" "Prior to local training, we require loading the HIGGS dataset from Flower" " Datasets and conduct data partitioning for FL:" msgstr "在本地训练之前,我们需要从 Flower Datasets 加载 HIGGS 数据集,并对 FL 进行数据分区:" -#: ../../source/tutorial-quickstart-xgboost.rst:102 +#: ../../source/tutorial-quickstart-xgboost.rst:115 #, fuzzy msgid "" "In this example, we split the dataset into 30 partitions with uniform " -"distribution (:code:`IidPartitioner(num_partitions=30)`). Then, we load " -"the partition for the given client based on :code:`partition_id`:" +"distribution (``IidPartitioner(num_partitions=30)``). Then, we load the " +"partition for the given client based on ``partition_id``:" msgstr "" "在此示例中,我们将数据集分割成两个均匀分布的分区(:code:`IidPartitioner(num_partitions=2)`)。然后,我们根据" " :code:`node_id` 为给定客户端加载分区:" -#: ../../source/tutorial-quickstart-xgboost.rst:121 +#: ../../source/tutorial-quickstart-xgboost.rst:135 +#, fuzzy msgid "" "After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for :code:`xgboost` package." +"local data), and transform data format for ``xgboost`` package." msgstr "然后,我们在给定的分区(客户端的本地数据)上进行训练/测试分割,并为 :code:`xgboost` 软件包转换数据格式。" -#: ../../source/tutorial-quickstart-xgboost.rst:134 +#: ../../source/tutorial-quickstart-xgboost.rst:149 +#, fuzzy msgid "" -"The functions of :code:`train_test_split` and " -":code:`transform_dataset_to_dmatrix` are defined as below:" +"The functions of ``train_test_split`` and " +"``transform_dataset_to_dmatrix`` are defined as below:" msgstr ":code:`train_test_split` 和 :code:`transform_dataset_too_dmatrix` 的函数定义如下:" -#: ../../source/tutorial-quickstart-xgboost.rst:158 +#: ../../source/tutorial-quickstart-xgboost.rst:174 msgid "Finally, we define the hyper-parameters used for XGBoost training." msgstr "最后,我们定义了用于 XGBoost 训练的超参数。" -#: ../../source/tutorial-quickstart-xgboost.rst:174 +#: ../../source/tutorial-quickstart-xgboost.rst:190 +#, fuzzy msgid "" -"The :code:`num_local_round` represents the number of iterations for local" -" tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " -"evaluation metric." +"The ``num_local_round`` represents the number of iterations for local " +"tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " +"metric." msgstr "" "代码:`num_local_round`表示本地树的迭代次数。我们默认使用 CPU 进行训练。可以通过将 :code:`tree_method` " "设置为 :code:`gpu_hist`,将其转换为 GPU。我们使用 AUC 作为评估指标。" -#: ../../source/tutorial-quickstart-xgboost.rst:181 +#: ../../source/tutorial-quickstart-xgboost.rst:195 msgid "Flower client definition for XGBoost" msgstr "用于 XGBoost 的 Flower 客户端定义" -#: ../../source/tutorial-quickstart-xgboost.rst:183 +#: ../../source/tutorial-quickstart-xgboost.rst:197 +#, fuzzy msgid "" "After loading the dataset we define the Flower client. We follow the " -"general rule to define :code:`XgbClient` class inherited from " -":code:`fl.client.Client`." +"general rule to define ``XgbClient`` class inherited from " +"``fl.client.Client``." msgstr "" "加载数据集后,我们定义 Flower 客户端。我们按照一般规则定义从 :code:`fl.client.Client` 继承而来的 " ":code:`XgbClient` 类。" -#: ../../source/tutorial-quickstart-xgboost.rst:205 +#: ../../source/tutorial-quickstart-xgboost.rst:219 msgid "" -"All required parameters defined above are passed to :code:`XgbClient`'s " +"All required parameters defined above are passed to ``XgbClient``'s " "constructor." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:207 +#: ../../source/tutorial-quickstart-xgboost.rst:221 +#, fuzzy msgid "" -"Then, we override :code:`get_parameters`, :code:`fit` and " -":code:`evaluate` methods insides :code:`XgbClient` class as follows." +"Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods " +"insides ``XgbClient`` class as follows." msgstr "" "然后,我们在 :code:`XgbClient` 类中重写 :code:`get_parameters`、:code:`fit` 和 " ":code:`evaluate` 方法如下。" -#: ../../source/tutorial-quickstart-xgboost.rst:221 +#: ../../source/tutorial-quickstart-xgboost.rst:236 +#, fuzzy msgid "" "Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use " -":code:`get_parameters` and :code:`set_parameters` to initialise model " -"parameters for XGBoost. As a result, let's return an empty tensor in " -":code:`get_parameters` when it is called by the server at the first " -"round." +"specified random weights. In this case, we do not use ``get_parameters`` " +"and ``set_parameters`` to initialise model parameters for XGBoost. As a " +"result, let's return an empty tensor in ``get_parameters`` when it is " +"called by the server at the first round." msgstr "" "与神经网络训练不同,XGBoost 树不是从指定的随机参数开始的。在这种情况下,我们不使用 :code:`get_parameters` 和 " ":code:`set_parameters` 来初始化 XGBoost 的模型参数。因此,当服务器在第一轮调用 " ":code:`get_parameters` 时,让我们在 :code:`get_parameters` 中返回一个空张量。" -#: ../../source/tutorial-quickstart-xgboost.rst:262 +#: ../../source/tutorial-quickstart-xgboost.rst:278 #, fuzzy msgid "" -"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " -"up the first set of trees. From the second round, we load the global " -"model sent from server to new build Booster object, and then update model" -" weights on local training data with function :code:`local_boost` as " -"follows:" +"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " +"first set of trees. From the second round, we load the global model sent " +"from server to new build Booster object, and then update model weights on" +" local training data with function ``local_boost`` as follows:" msgstr "" "在 :code:`fit`中,第一轮我们调用 :code:`xgb.train()`来建立第一组树,返回的 Booster 对象和 config " "分别存储在 :code:`self.bst` 和 :code:`self.config` 中。从第二轮开始,我们将服务器发送的全局模型加载到 " ":code:`self.bst`,然后使用函数 :code:`local_boost`更新本地训练数据的模型权重,如下所示:" -#: ../../source/tutorial-quickstart-xgboost.rst:281 +#: ../../source/tutorial-quickstart-xgboost.rst:298 #, fuzzy msgid "" -"Given :code:`num_local_round`, we update trees by calling " -":code:`bst_input.update` method. After training, the last " -":code:`N=num_local_round` trees will be extracted to send to the server." +"Given ``num_local_round``, we update trees by calling " +"``bst_input.update`` method. After training, the last " +"``N=num_local_round`` trees will be extracted to send to the server." msgstr "" "给定 :code:`num_local_round`,我们通过调用 " ":code:`self.bst.update`方法更新树。训练结束后,我们将提取最后一个 :code:`N=num_local_round` " "树并发送给服务器。" -#: ../../source/tutorial-quickstart-xgboost.rst:313 +#: ../../source/tutorial-quickstart-xgboost.rst:330 #, fuzzy msgid "" -"In :code:`evaluate`, after loading the global model, we call " -":code:`bst.eval_set` function to conduct evaluation on valid set. The AUC" -" value will be returned." +"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" +" function to conduct evaluation on valid set. The AUC value will be " +"returned." msgstr "在 :code:`evaluate`中,我们调用 :code:`self.bst.eval_set`函数对有效集合进行评估。将返回 AUC 值。" -#: ../../source/tutorial-quickstart-xgboost.rst:316 +#: ../../source/tutorial-quickstart-xgboost.rst:333 +#, fuzzy msgid "" -"Now, we can create an instance of our class :code:`XgbClient` and add one" -" line to actually run this client:" +"Now, we can create an instance of our class ``XgbClient`` and add one " +"line to actually run this client:" msgstr "现在,我们可以创建一个 :code:`XgbClient` 类的实例,并添加一行来实际运行该客户端:" -#: ../../source/tutorial-quickstart-xgboost.rst:332 +#: ../../source/tutorial-quickstart-xgboost.rst:350 #, fuzzy msgid "" -"That's it for the client. We only have to implement :code:`Client` and " -"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"That's it for the client. We only have to implement ``Client`` and call " +"``fl.client.start_client()``. The string ``\"[::]:8080\"`` tells the " +"client which server to connect to. In our case we can run the server and " +"the client on the same machine, therefore we use ``\"[::]:8080\"``. If we" +" run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" point the client at." msgstr "" "这就是客户端。我们只需实现 :code:`客户端`并调用 :code:`fl.client.start_client()`。字符串 " ":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " ":code:`\"[::]:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的 " ":code:`server_address`。" -#: ../../source/tutorial-quickstart-xgboost.rst:343 +#: ../../source/tutorial-quickstart-xgboost.rst:360 msgid "" "These updates are then sent to the *server* which will aggregate them to " "produce a better model. Finally, the *server* sends this improved version" @@ -27046,108 +27411,112 @@ msgstr "" "然后,这些更新会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将这个改进版的模型发回给每个*客户端*,以完成一轮完整的" " FL。" -#: ../../source/tutorial-quickstart-xgboost.rst:346 +#: ../../source/tutorial-quickstart-xgboost.rst:364 +#, fuzzy msgid "" -"In a file named :code:`server.py`, import Flower and FedXgbBagging from " -":code:`flwr.server.strategy`." +"In a file named ``server.py``, import Flower and FedXgbBagging from " +"``flwr.server.strategy``." msgstr "" "在名为 :code:`server.py` 的文件中,从 :code:`flwr.server.strategy` 导入 Flower 和 " "FedXgbBagging。" -#: ../../source/tutorial-quickstart-xgboost.rst:348 +#: ../../source/tutorial-quickstart-xgboost.rst:367 msgid "We first define a strategy for XGBoost bagging aggregation." msgstr "我们首先定义了 XGBoost bagging聚合策略。" -#: ../../source/tutorial-quickstart-xgboost.rst:380 +#: ../../source/tutorial-quickstart-xgboost.rst:401 #, fuzzy msgid "" -"We use two clients for this example. An " -":code:`evaluate_metrics_aggregation` function is defined to collect and " -"wighted average the AUC values from clients. The :code:`config_func` " -"function is to return the current FL round number to client's " -":code:`fit()` and :code:`evaluate()` methods." +"We use two clients for this example. An ``evaluate_metrics_aggregation`` " +"function is defined to collect and wighted average the AUC values from " +"clients. The ``config_func`` function is to return the current FL round " +"number to client's ``fit()`` and ``evaluate()`` methods." msgstr "" "本示例使用两个客户端。我们定义了一个 :code:`evaluate_metrics_aggregation` 函数,用于收集客户机的 AUC " "值并求取平均值。" -#: ../../source/tutorial-quickstart-xgboost.rst:384 +#: ../../source/tutorial-quickstart-xgboost.rst:406 msgid "Then, we start the server:" msgstr "然后,我们启动服务器:" -#: ../../source/tutorial-quickstart-xgboost.rst:396 +#: ../../source/tutorial-quickstart-xgboost.rst:418 msgid "Tree-based bagging aggregation" msgstr "基于树的bagging聚合" -#: ../../source/tutorial-quickstart-xgboost.rst:398 +#: ../../source/tutorial-quickstart-xgboost.rst:420 msgid "" "You must be curious about how bagging aggregation works. Let's look into " "the details." msgstr "您一定很好奇bagging聚合是如何工作的。让我们来详细了解一下。" -#: ../../source/tutorial-quickstart-xgboost.rst:400 +#: ../../source/tutorial-quickstart-xgboost.rst:422 +#, fuzzy msgid "" -"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " -":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." -" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " -"and :code:`evaluate` methods as follows:" +"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " +"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " +"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " +"``evaluate`` methods as follows:" msgstr "" "在文件 :code:`flwr.server.strategy.fedxgb_bagging.py`中,我们定义了从 " ":code:`flwr.server.strategy.FedAvg`继承的 :code:`FedXgbBagging`。然后,我们覆盖 " ":code:`aggregate_fit`、:code:`aggregate_evaluate` 和 :code:`evaluate` 方法如下:" -#: ../../source/tutorial-quickstart-xgboost.rst:496 +#: ../../source/tutorial-quickstart-xgboost.rst:519 +#, fuzzy msgid "" -"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " -"trees by calling :code:`aggregate()` function:" +"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " +"trees by calling ``aggregate()`` function:" msgstr "" "在 :code:`aggregate_fit` 中,我们通过调用 :code:`aggregate()` 函数,按顺序聚合客户端的 XGBoost" " 树:" -#: ../../source/tutorial-quickstart-xgboost.rst:555 +#: ../../source/tutorial-quickstart-xgboost.rst:579 +#, fuzzy msgid "" "In this function, we first fetch the number of trees and the number of " "parallel trees for the current and previous model by calling " -":code:`_get_tree_nums`. Then, the fetched information will be aggregated." -" After that, the trees (containing model weights) are aggregated to " +"``_get_tree_nums``. Then, the fetched information will be aggregated. " +"After that, the trees (containing model weights) are aggregated to " "generate a new tree model." msgstr "" "在该函数中,我们首先通过调用 :code:`_get_tree_nums` " "获取当前模型和上一个模型的树数和并行树数。然后,对获取的信息进行聚合。然后,聚合树(包含模型参数)生成新的树模型。" -#: ../../source/tutorial-quickstart-xgboost.rst:560 +#: ../../source/tutorial-quickstart-xgboost.rst:584 msgid "" "After traversal of all clients' models, a new global model is generated, " "followed by the serialisation, and sending back to each client." msgstr "在遍历所有客户端的模型后,会生成一个新的全局模型,然后进行序列化,并发回给每个客户端。" -#: ../../source/tutorial-quickstart-xgboost.rst:565 +#: ../../source/tutorial-quickstart-xgboost.rst:588 msgid "Launch Federated XGBoost!" msgstr "启动联邦 XGBoost!" -#: ../../source/tutorial-quickstart-xgboost.rst:641 +#: ../../source/tutorial-quickstart-xgboost.rst:664 +#, fuzzy msgid "" "Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in " -":code:`metrics_distributed`. One can see that the average AUC increases " -"over FL rounds." +"XGBoost system. The AUC values can be checked in ``metrics_distributed``." +" One can see that the average AUC increases over FL rounds." msgstr "" "恭喜您!您已成功构建并运行了第一个联邦 XGBoost 系统。可以在 :code:`metrics_distributed` 中查看 AUC " "值。我们可以看到,平均 AUC 随 FL 轮数的增加而增加。" -#: ../../source/tutorial-quickstart-xgboost.rst:646 +#: ../../source/tutorial-quickstart-xgboost.rst:668 +#, fuzzy msgid "" "The full `source code `_ for this example can be found in :code:`examples" -"/xgboost-quickstart`." +"/xgboost-quickstart/>`_ for this example can be found in ``examples" +"/xgboost-quickstart``." msgstr "" "此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" -#: ../../source/tutorial-quickstart-xgboost.rst:650 +#: ../../source/tutorial-quickstart-xgboost.rst:673 msgid "Comprehensive Federated XGBoost" msgstr "综合的联邦 XGBoost" -#: ../../source/tutorial-quickstart-xgboost.rst:652 +#: ../../source/tutorial-quickstart-xgboost.rst:675 #, fuzzy msgid "" "Now that you have known how federated XGBoost work with Flower, it's time" @@ -27165,12 +27534,12 @@ msgstr "" "`_)中,我们提供了更多选项来定义各种实验设置,包括数据分区和集中/分布式评估。让我们一起来看看!" -#: ../../source/tutorial-quickstart-xgboost.rst:659 +#: ../../source/tutorial-quickstart-xgboost.rst:685 #, fuzzy msgid "Cyclic training" msgstr "集中式训练" -#: ../../source/tutorial-quickstart-xgboost.rst:661 +#: ../../source/tutorial-quickstart-xgboost.rst:687 #, fuzzy msgid "" "In addition to bagging aggregation, we offer a cyclic training scheme, " @@ -27184,20 +27553,18 @@ msgstr "" "FL。在循环训练方案中,每轮只有一个客户端参与训练,而不是多个客户端聚合在一起。训练好的本地 XGBoost " "树将传递给下一个客户端,作为下一轮提升的初始化模型。" -#: ../../source/tutorial-quickstart-xgboost.rst:665 +#: ../../source/tutorial-quickstart-xgboost.rst:693 #, fuzzy -msgid "" -"To do this, we first customise a :code:`ClientManager` in " -":code:`server_utils.py`:" +msgid "To do this, we first customise a ``ClientManager`` in ``server_utils.py``:" msgstr "为此,我们首先要在 :code:`server_utils.py` 中自定义一个 :code:`ClientManager`:" -#: ../../source/tutorial-quickstart-xgboost.rst:705 +#: ../../source/tutorial-quickstart-xgboost.rst:733 #, fuzzy msgid "" -"The customised :code:`ClientManager` samples all available clients in " -"each FL round based on the order of connection to the server. Then, we " -"define a new strategy :code:`FedXgbCyclic` in " -":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " +"The customised ``ClientManager`` samples all available clients in each FL" +" round based on the order of connection to the server. Then, we define a " +"new strategy ``FedXgbCyclic`` in " +"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " "select only one client in given round and pass the received model to next" " client." msgstr "" @@ -27205,174 +27572,176 @@ msgstr "" ":code:`flwr.server.strategy.fedxgb_cyclic.py`\"中定义了一个新策略 " ":code:`FedXgbCyclic`,以便在给定回合中按顺序只选择一个客户端,并将接收到的模型传递给下一个客户端。" -#: ../../source/tutorial-quickstart-xgboost.rst:746 +#: ../../source/tutorial-quickstart-xgboost.rst:775 #, fuzzy msgid "" -"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " +"Unlike the original ``FedAvg``, we don't perform aggregation here. " "Instead, we just make a copy of the received client model as global model" -" by overriding :code:`aggregate_fit`." +" by overriding ``aggregate_fit``." msgstr "" "与最初的 :code:`FedAvg` 不同,我们在这里不执行聚合。相反,我们只是通过覆盖 :code:`aggregate_fit` " "将接收到的客户端模型复制为全局模型。" -#: ../../source/tutorial-quickstart-xgboost.rst:749 +#: ../../source/tutorial-quickstart-xgboost.rst:778 #, fuzzy msgid "" -"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" -" methods ensure the clients to be sequentially selected given FL round:" +"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" +" ensure the clients to be sequentially selected given FL round:" msgstr "" "此外,定制的 :code:`configure_fit` 和 :code:`configure_evaluate` 方法可确保在 FL " "轮中按顺序选择客户:" -#: ../../source/tutorial-quickstart-xgboost.rst:813 +#: ../../source/tutorial-quickstart-xgboost.rst:840 msgid "Customised data partitioning" msgstr "定制数据分区" -#: ../../source/tutorial-quickstart-xgboost.rst:815 +#: ../../source/tutorial-quickstart-xgboost.rst:842 +#, fuzzy msgid "" -"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" -" to instantiate the data partitioner based on the given " -":code:`num_partitions` and :code:`partitioner_type`. Currently, we " -"provide four supported partitioner type to simulate the uniformity/non-" -"uniformity in data quantity (uniform, linear, square, exponential)." +"In ``dataset.py``, we have a function ``instantiate_partitioner`` to " +"instantiate the data partitioner based on the given ``num_partitions`` " +"and ``partitioner_type``. Currently, we provide four supported " +"partitioner type to simulate the uniformity/non-uniformity in data " +"quantity (uniform, linear, square, exponential)." msgstr "" "在 :code:`dataset.py` 中,我们有一个函数 :code:`instantiate_partitioner` 来根据给定的 " ":code:`num_partitions` 和 :code:`partitioner_type` " "来实例化数据分区器。目前,我们提供四种支持的分区器类型(均匀、线性、正方形、指数)来模拟数据量的均匀性/非均匀性。" -#: ../../source/tutorial-quickstart-xgboost.rst:846 +#: ../../source/tutorial-quickstart-xgboost.rst:873 msgid "Customised centralised/distributed evaluation" msgstr "定制的集中/分布式评估" -#: ../../source/tutorial-quickstart-xgboost.rst:848 +#: ../../source/tutorial-quickstart-xgboost.rst:875 #, fuzzy msgid "" "To facilitate centralised evaluation, we define a function in " -":code:`server_utils.py`:" +"``server_utils.py``:" msgstr "为便于集中评估,我们在 :code:`server.py` 中定义了一个函数:" -#: ../../source/tutorial-quickstart-xgboost.rst:880 +#: ../../source/tutorial-quickstart-xgboost.rst:907 +#, fuzzy msgid "" "This function returns a evaluation function which instantiates a " -":code:`Booster` object and loads the global model weights to it. The " -"evaluation is conducted by calling :code:`eval_set()` method, and the " -"tested AUC value is reported." +"``Booster`` object and loads the global model weights to it. The " +"evaluation is conducted by calling ``eval_set()`` method, and the tested " +"AUC value is reported." msgstr "" "此函数返回一个评估函数,该函数实例化一个 :code:`Booster` 对象,并向其加载全局模型参数。评估通过调用 " ":code:`eval_set()` 方法进行,并报告测试的 AUC 值。" -#: ../../source/tutorial-quickstart-xgboost.rst:883 +#: ../../source/tutorial-quickstart-xgboost.rst:911 #, fuzzy msgid "" "As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the :code:`evaluate()` method insides the " -":code:`XgbClient` class in :code:`client_utils.py`." +"start example by overriding the ``evaluate()`` method insides the " +"``XgbClient`` class in ``client_utils.py``." msgstr "" "至于客户端上的分布式评估,与快速启动示例相同,通过覆盖 :code:`client.py` 中 :code:`XgbClient` 类内部的 " ":code:`evaluate()` 方法。" -#: ../../source/tutorial-quickstart-xgboost.rst:887 +#: ../../source/tutorial-quickstart-xgboost.rst:916 #, fuzzy msgid "Flower simulation" msgstr "运行模拟" -#: ../../source/tutorial-quickstart-xgboost.rst:888 +#: ../../source/tutorial-quickstart-xgboost.rst:918 #, fuzzy msgid "" -"We also provide an example code (:code:`sim.py`) to use the simulation " +"We also provide an example code (``sim.py``) to use the simulation " "capabilities of Flower to simulate federated XGBoost training on either a" " single machine or a cluster of machines." msgstr "我们还提供了一个示例代码(:code:`sim.py`),用于使用 Flower 的模拟功能在单台机器或机器集群上模拟联合 XGBoost 训练。" -#: ../../source/tutorial-quickstart-xgboost.rst:922 +#: ../../source/tutorial-quickstart-xgboost.rst:954 #, fuzzy msgid "" -"After importing all required packages, we define a :code:`main()` " -"function to perform the simulation process:" +"After importing all required packages, we define a ``main()`` function to" +" perform the simulation process:" msgstr "导入所有需要的软件包后,我们定义了一个 :code:`main()` 函数来执行模拟程序:" -#: ../../source/tutorial-quickstart-xgboost.rst:977 +#: ../../source/tutorial-quickstart-xgboost.rst:1010 #, fuzzy msgid "" "We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a :code:`list`. After the simulation begins, " -"the clients won't need to pre-process their partitions again." +"processed data is stored in a ``list``. After the simulation begins, the " +"clients won't need to pre-process their partitions again." msgstr "我们首先加载数据集并执行数据分区,预处理后的数据存储在 :code:`list` 中。模拟开始后,客户端就不需要再预处理分区了。" -#: ../../source/tutorial-quickstart-xgboost.rst:980 +#: ../../source/tutorial-quickstart-xgboost.rst:1014 #, fuzzy msgid "Then, we define the strategies and other hyper-parameters:" msgstr "然后,我们定义策略和其他超参数:" -#: ../../source/tutorial-quickstart-xgboost.rst:1031 +#: ../../source/tutorial-quickstart-xgboost.rst:1065 #, fuzzy msgid "" "After that, we start the simulation by calling " -":code:`fl.simulation.start_simulation`:" +"``fl.simulation.start_simulation``:" msgstr "然后,我们调用 :code:`fl.simulation.start_simulation` 开始模拟:" -#: ../../source/tutorial-quickstart-xgboost.rst:1051 +#: ../../source/tutorial-quickstart-xgboost.rst:1085 #, fuzzy msgid "" -"One of key parameters for :code:`start_simulation` is :code:`client_fn` " -"which returns a function to construct a client. We define it as follows:" +"One of key parameters for ``start_simulation`` is ``client_fn`` which " +"returns a function to construct a client. We define it as follows:" msgstr "" ":code:`start_simulation` 的一个关键参数是 " ":code:`client_fn`,它返回一个用于构建客户端的函数。我们将其定义如下:" -#: ../../source/tutorial-quickstart-xgboost.rst:1094 +#: ../../source/tutorial-quickstart-xgboost.rst:1126 msgid "Arguments parser" msgstr "参数解析器" -#: ../../source/tutorial-quickstart-xgboost.rst:1096 +#: ../../source/tutorial-quickstart-xgboost.rst:1128 #, fuzzy msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients, server " -"and simulation, allowing users to specify different experimental " -"settings. Let's first see the sever side:" +"In ``utils.py``, we define the arguments parsers for clients, server and " +"simulation, allowing users to specify different experimental settings. " +"Let's first see the sever side:" msgstr "在 :code:`utils.py` 中,我们定义了客户端和服务器端的参数解析器,允许用户指定不同的实验设置。让我们先看看服务器端:" -#: ../../source/tutorial-quickstart-xgboost.rst:1142 +#: ../../source/tutorial-quickstart-xgboost.rst:1175 #, fuzzy msgid "" "This allows user to specify training strategies / the number of total " "clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" -" will do centralised evaluation and all functionalities for client " +" evaluation fashion. Note that with ``--centralised-eval``, the sever " +"will do centralised evaluation and all functionalities for client " "evaluation will be disabled." msgstr "" "这允许用户指定总客户数/FL 轮数/参与客户数/评估客户数以及评估方式。请注意,如果使用 :code:`--centralised-" "eval`,服务器将进行集中评估,客户端评估的所有功能将被禁用。" -#: ../../source/tutorial-quickstart-xgboost.rst:1146 +#: ../../source/tutorial-quickstart-xgboost.rst:1180 msgid "Then, the argument parser on client side:" msgstr "然后是客户端的参数解析器:" -#: ../../source/tutorial-quickstart-xgboost.rst:1200 +#: ../../source/tutorial-quickstart-xgboost.rst:1234 #, fuzzy msgid "" "This defines various options for client data partitioning. Besides, " "clients also have an option to conduct evaluation on centralised test set" -" by setting :code:`--centralised-eval`, as well as an option to perform " -"scaled learning rate based on the number of clients by setting :code" -":`--scaled-lr`." +" by setting ``--centralised-eval``, as well as an option to perform " +"scaled learning rate based on the number of clients by setting " +"``--scaled-lr``." msgstr "这定义了客户端数据分区的各种选项。此外,通过设置 :code:`-centralised-eval`,客户端还可以选择在集中测试集上进行评估。" -#: ../../source/tutorial-quickstart-xgboost.rst:1204 +#: ../../source/tutorial-quickstart-xgboost.rst:1239 #, fuzzy msgid "We also have an argument parser for simulation:" msgstr "我们还有一个用于模拟的参数解析器:" -#: ../../source/tutorial-quickstart-xgboost.rst:1282 +#: ../../source/tutorial-quickstart-xgboost.rst:1317 #, fuzzy msgid "This integrates all arguments for both client and server sides." msgstr "这整合了客户端和服务器端的所有参数。" -#: ../../source/tutorial-quickstart-xgboost.rst:1285 +#: ../../source/tutorial-quickstart-xgboost.rst:1320 msgid "Example commands" msgstr "命令示例" -#: ../../source/tutorial-quickstart-xgboost.rst:1287 +#: ../../source/tutorial-quickstart-xgboost.rst:1322 #, fuzzy msgid "" "To run a centralised evaluated experiment with bagging strategy on 5 " @@ -27380,21 +27749,21 @@ msgid "" "server as below:" msgstr "为了在 5 个客户端上进行 50 轮指数分布的集中评估实验,我们首先启动服务器,如下所示:" -#: ../../source/tutorial-quickstart-xgboost.rst:1294 +#: ../../source/tutorial-quickstart-xgboost.rst:1329 msgid "Then, on each client terminal, we start the clients:" msgstr "然后,我们在每个客户终端上启动客户机:" -#: ../../source/tutorial-quickstart-xgboost.rst:1300 +#: ../../source/tutorial-quickstart-xgboost.rst:1335 #, fuzzy msgid "To run the same experiment with Flower simulation:" msgstr "运行与 Flower 模拟相同的实验:" -#: ../../source/tutorial-quickstart-xgboost.rst:1306 +#: ../../source/tutorial-quickstart-xgboost.rst:1341 #, fuzzy msgid "" "The full `code `_ for this comprehensive example can be found in" -" :code:`examples/xgboost-comprehensive`." +" ``examples/xgboost-comprehensive``." msgstr "" "此综合示例的全部`源代码 `_ 可在 :code:`examples/xgboost-comprehensive` 中找到。" @@ -28994,7 +29363,7 @@ msgid "" msgstr "在机器学习中,我们有一个模型和数据。模型可以是一个神经网络(如图所示),也可以是其他东西,比如经典的线性回归。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +msgid "|3a7aceef05f0421794726ac54aaf12fd|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 @@ -29009,7 +29378,7 @@ msgid "" msgstr "我们使用数据来训练模型,以完成一项有用的任务。任务可以是检测图像中的物体、转录音频或玩围棋等游戏。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|33cacb7d985c4906b348515c1a5cd993|" +msgid "|d741075f8e624331b42c0746f7d258a0|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 @@ -29030,7 +29399,7 @@ msgid "" msgstr "它源于智能手机上用户与应用程序的交互、汽车上传感器数据的收集、笔记本电脑上键盘输入的接收,或者智能扬声器上某人试着唱的歌。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|cc080a555947492fa66131dc3a967603|" +msgid "|8fc92d668bcb42b8bda55143847f2329|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 @@ -29048,7 +29417,7 @@ msgstr "" "\"通常不只是一个地方,而是很多地方。它可能是多个运行同一应用程序的设备。但也可能是多个组织,都在为同一任务生成数据。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|085c3e0fb8664c6aa06246636524b20b|" +msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 @@ -29064,7 +29433,7 @@ msgid "" msgstr "因此,要使用机器学习或任何类型的数据分析,过去使用的方法是在中央服务器上收集所有数据。这个服务器可以在数据中心的某个地方,也可以在云端的某个地方。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|bfe69c74e48c45d49b50251c38c2a019|" +msgid "|77a037b546a84262b608e04bc82a2c96|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 @@ -29079,7 +29448,7 @@ msgid "" msgstr "一旦所有数据都收集到一处,我们最终就可以使用机器学习算法在数据上训练我们的模型。这就是我们基本上一直依赖的机器学习方法。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +msgid "|f568e24c9fb0435690ac628210a4be96|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 @@ -29099,7 +29468,7 @@ msgid "" msgstr "我们刚刚看到的经典机器学习方法可以在某些情况下使用。很好的例子包括对假日照片进行分类或分析网络流量。在这些案例中,所有数据自然都可以在中央服务器上获得。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|163117eb654a4273babba413cf8065f5|" +msgid "|a7bf029981514e2593aa3a2b48c9d76a|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 @@ -29114,7 +29483,7 @@ msgid "" msgstr "但这种方法并不适用于许多其他情况。例如,集中服务器上没有数据,或者一台服务器上的数据不足以训练出一个好的模型。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +msgid "|3f645ad807f84be8b1f8f3267173939c|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 @@ -29262,7 +29631,7 @@ msgid "" msgstr "我们首先在服务器上初始化模型。这与经典的集中式学习完全相同:我们随机或从先前保存的检查点初始化模型参数。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|f403fcd69e4e44409627e748b404c086|" +msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 @@ -29286,7 +29655,7 @@ msgid "" msgstr "接下来,我们会将全局模型的参数发送到连接的客户端节点(如智能手机等边缘设备或企业的服务器)。这是为了确保每个参与节点都使用相同的模型参数开始本地训练。我们通常只使用几个连接节点,而不是所有节点。这样做的原因是,选择越来越多的客户端节点会导致收益递减。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|4b00fe63870145968f8443619a792a42|" +msgid "|edcf9a04d96e42608fd01a333375febe|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 @@ -29312,7 +29681,7 @@ msgstr "" "(mini-batches)。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|368378731066486fa4397e89bc6b870c|" +msgid "|3dae22fe797043968e2b7aa7073c78bd|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 @@ -29335,7 +29704,7 @@ msgid "" msgstr "经过本地训练后,每个客户节点最初收到的模型参数都会略有不同。参数之所以不同,是因为每个客户端节点的本地数据集中都有不同的数据。然后,客户端节点将这些模型更新发回服务器。它们发送的模型更新既可以是完整的模型参数,也可以只是本地训练过程中积累的梯度。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|a66aa83d85bf4ffba7ed660b718066da|" +msgid "|ba178f75267d4ad8aa7363f20709195f|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 @@ -29381,7 +29750,7 @@ msgstr "" " 100 个示例的 10 倍。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|82324b9af72a4582a81839d55caab767|" +msgid "|c380c750bfd2444abce039a1c6fa8e60|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 @@ -29488,7 +29857,7 @@ msgstr "" "为联邦学习、分析和评估提供了一种统一的方法。它允许用户联邦化任何工作负载、任何 ML 框架和任何编程语言。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +msgid "|e7cec00a114b48359935c6510595132e|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 @@ -33370,3 +33739,166 @@ msgstr "" #~ msgid "|ed9aae51da70428eab7eef32f21e819e|" #~ msgstr "" +#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +#~ msgstr "" + +#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" +#~ msgstr "" + +#~ msgid "|cc080a555947492fa66131dc3a967603|" +#~ msgstr "" + +#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" +#~ msgstr "" + +#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" +#~ msgstr "" + +#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +#~ msgstr "" + +#~ msgid "|163117eb654a4273babba413cf8065f5|" +#~ msgstr "" + +#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +#~ msgstr "" + +#~ msgid "|f403fcd69e4e44409627e748b404c086|" +#~ msgstr "" + +#~ msgid "|4b00fe63870145968f8443619a792a42|" +#~ msgstr "" + +#~ msgid "|368378731066486fa4397e89bc6b870c|" +#~ msgstr "" + +#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" +#~ msgstr "" + +#~ msgid "|82324b9af72a4582a81839d55caab767|" +#~ msgstr "" + +#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +#~ msgstr "" + +#~ msgid "" +#~ "Some quickstart examples may have " +#~ "limitations or requirements that prevent " +#~ "them from running on every environment." +#~ " For more information, please see " +#~ "`Limitations`_." +#~ msgstr "" + +#~ msgid "" +#~ "Change the application code. For " +#~ "example, change the ``seed`` in " +#~ "``quickstart_docker/task.py`` to ``43`` and " +#~ "save it:" +#~ msgstr "" + +#~ msgid ":code:`fit`" +#~ msgstr ":code:`fit`" + +#~ msgid "" +#~ "Note that since version :code:`1.11.0`, " +#~ ":code:`flower-server-app` no longer " +#~ "supports passing a reference to a " +#~ "`ServerApp` attribute. Instead, you need " +#~ "to pass the path to Flower app " +#~ "via the argument :code:`--app`. This is" +#~ " the path to a directory containing" +#~ " a `pyproject.toml`. You can create a" +#~ " valid Flower app by executing " +#~ ":code:`flwr new` and following the " +#~ "prompt." +#~ msgstr "" + +#~ msgid "" +#~ "All required parameters defined above " +#~ "are passed to :code:`XgbClient`'s constructor." +#~ msgstr "" + +#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" +#~ msgstr "" + +#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" +#~ msgstr "" + +#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" +#~ msgstr "" + +#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" +#~ msgstr "" + +#~ msgid "|d7efb5705dd3467f991ed23746824a07|" +#~ msgstr "" + +#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" +#~ msgstr "" + +#~ msgid "|a80714782dde439ab73936518f91fc3c|" +#~ msgstr "" + +#~ msgid "|c62080ca6197473da57d191c8225a9d9|" +#~ msgstr "" + +#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" +#~ msgstr "" + +#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" +#~ msgstr "" + +#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" +#~ msgstr "" + +#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" +#~ msgstr "" + +#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" +#~ msgstr "" + +#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" +#~ msgstr "" + +#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" +#~ msgstr "" + +#~ msgid "|131af8322dc5466b827afd24be98f8c0|" +#~ msgstr "" + +#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" +#~ msgstr "" + +#~ msgid "|d62da263071d45a496f543e41fce3a19|" +#~ msgstr "" + +#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" +#~ msgstr "" + +#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" +#~ msgstr "" + +#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" +#~ msgstr "" + +#~ msgid "|b021ff9d25814458b1e631f8985a648b|" +#~ msgstr "" + +#~ msgid "|e6ca84e1df244f238288a768352678e5|" +#~ msgstr "" + +#~ msgid "|39c2422082554a21963baffb33a0d057|" +#~ msgstr "" + +#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" +#~ msgstr "" + +#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" +#~ msgstr "" + +#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" +#~ msgstr "" + +#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" +#~ msgstr "" + diff --git a/doc/source/conf.py b/doc/source/conf.py index 033b345b60cc..d78aeda0d48e 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -95,7 +95,7 @@ rst_prolog = """ .. |stable_flwr_version| replace:: 1.11.1 .. |stable_flwr_superlink_docker_digest| replace:: 4b317d5b6030710b476f4dbfab2c3a33021ad40a0fcfa54d7edd45e0c51d889c -.. |ubuntu_version| replace:: 22.04 +.. |ubuntu_version| replace:: 24.04 .. |setuptools_version| replace:: 70.3.0 .. |pip_version| replace:: 24.1.2 """ diff --git a/doc/source/contributor-explanation-public-and-private-apis.rst b/doc/source/contributor-explanation-public-and-private-apis.rst index 1dfdf88f97d3..ac62ae341f14 100644 --- a/doc/source/contributor-explanation-public-and-private-apis.rst +++ b/doc/source/contributor-explanation-public-and-private-apis.rst @@ -1,22 +1,23 @@ Public and private APIs ======================= -In Python, everything is public. -To enable developers to understand which components can be relied upon, Flower declares a public API. -Components that are part of the public API can be relied upon. -Changes to the public API are announced in the release notes and are subject to deprecation policies. +In Python, everything is public. To enable developers to understand which components can +be relied upon, Flower declares a public API. Components that are part of the public API +can be relied upon. Changes to the public API are announced in the release notes and are +subject to deprecation policies. -Everything that is not part of the public API is part of the private API. -Even though Python allows accessing them, user code should never use those components. -Private APIs can change at any time, even in patch releases. +Everything that is not part of the public API is part of the private API. Even though +Python allows accessing them, user code should never use those components. Private APIs +can change at any time, even in patch releases. How can you determine whether a component is part of the public API or not? Easy: - `Use the Flower API reference documentation `_ - `Use the Flower CLI reference documentation `_ -Everything listed in the reference documentation is part of the public API. -This document explains how Flower maintainers define the public API and how you can determine whether a component is part of the public API or not by reading the Flower source code. +Everything listed in the reference documentation is part of the public API. This +document explains how Flower maintainers define the public API and how you can determine +whether a component is part of the public API or not by reading the Flower source code. Flower public API ----------------- @@ -25,94 +26,117 @@ Flower has a well-defined public API. Let's look at this in more detail. .. important:: - Every component that is reachable by recursively following ``__init__.__all__`` starting from the root package (``flwr``) is part of the public API. + Every component that is reachable by recursively following ``__init__.__all__`` + starting from the root package (``flwr``) is part of the public API. -If you want to determine whether a component (class/function/generator/...) is part of the public API or not, you need to start at the root of the ``flwr`` package. -Let's use ``tree -L 1 -d src/py/flwr`` to look at the Python sub-packages contained ``flwr``: +If you want to determine whether a component (class/function/generator/...) is part of +the public API or not, you need to start at the root of the ``flwr`` package. Let's use +``tree -L 1 -d src/py/flwr`` to look at the Python sub-packages contained ``flwr``: .. code-block:: bash - flwr - ├── cli - ├── client - ├── common - ├── proto - ├── server - └── simulation + flwr + ├── cli + ├── client + ├── common + ├── proto + ├── server + └── simulation -Contrast this with the definition of ``__all__`` in the root ``src/py/flwr/__init__.py``: +Contrast this with the definition of ``__all__`` in the root +``src/py/flwr/__init__.py``: .. code-block:: python - # From `flwr/__init__.py` - __all__ = [ - "client", - "common", - "server", - "simulation", - ] - -You can see that ``flwr`` has six subpackages (``cli``, ``client``, ``common``, ``proto``, ``server``, ``simulation``), but only four of them are "exported" via ``__all__`` (``client``, ``common``, ``server``, ``simulation``). - -What does this mean? It means that ``client``, ``common``, ``server`` and ``simulation`` are part of the public API, but ``cli`` and ``proto`` are not. -The ``flwr`` subpackages ``cli`` and ``proto`` are private APIs. -A private API can change completely from one release to the next (even in patch releases). -It can change in a breaking way, it can be renamed (for example, ``flwr.cli`` could be renamed to ``flwr.command``) and it can even be removed completely. + # From `flwr/__init__.py` + __all__ = [ + "client", + "common", + "server", + "simulation", + ] + +You can see that ``flwr`` has six subpackages (``cli``, ``client``, ``common``, +``proto``, ``server``, ``simulation``), but only four of them are "exported" via +``__all__`` (``client``, ``common``, ``server``, ``simulation``). + +What does this mean? It means that ``client``, ``common``, ``server`` and ``simulation`` +are part of the public API, but ``cli`` and ``proto`` are not. The ``flwr`` subpackages +``cli`` and ``proto`` are private APIs. A private API can change completely from one +release to the next (even in patch releases). It can change in a breaking way, it can be +renamed (for example, ``flwr.cli`` could be renamed to ``flwr.command``) and it can even +be removed completely. Therefore, as a Flower user: - ``from flwr import client`` ✅ Ok, you're importing a public API. - ``from flwr import proto`` ❌ Not recommended, you're importing a private API. -What about components that are nested deeper in the hierarchy? Let's look at Flower strategies to see another typical pattern. -Flower strategies like ``FedAvg`` are often imported using ``from flwr.server.strategy import FedAvg``. -Let's look at ``src/py/flwr/server/strategy/__init__.py``: +What about components that are nested deeper in the hierarchy? Let's look at Flower +strategies to see another typical pattern. Flower strategies like ``FedAvg`` are often +imported using ``from flwr.server.strategy import FedAvg``. Let's look at +``src/py/flwr/server/strategy/__init__.py``: .. code-block:: python - from .fedavg import FedAvg as FedAvg - # ... more imports + from .fedavg import FedAvg as FedAvg + + # ... more imports - __all__ = [ - "FedAvg", - # ... more exports - ] + __all__ = [ + "FedAvg", + # ... more exports + ] -What's notable here is that all strategies are implemented in dedicated modules (e.g., ``fedavg.py``). -In ``__init__.py``, we *import* the components we want to make part of the public API and then *export* them via ``__all__``. -Note that we export the component itself (for example, the ``FedAvg`` class), but not the module it is defined in (for example, ``fedavg.py``). -This allows us to move the definition of ``FedAvg`` into a different module (or even a module in a subpackage) without breaking the public API (as long as we update the import path in ``__init__.py``). +What's notable here is that all strategies are implemented in dedicated modules (e.g., +``fedavg.py``). In ``__init__.py``, we *import* the components we want to make part of +the public API and then *export* them via ``__all__``. Note that we export the component +itself (for example, the ``FedAvg`` class), but not the module it is defined in (for +example, ``fedavg.py``). This allows us to move the definition of ``FedAvg`` into a +different module (or even a module in a subpackage) without breaking the public API (as +long as we update the import path in ``__init__.py``). Therefore: -- ``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a class that is part of the public API. -- ``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're importing a private module. +- ``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a class that is + part of the public API. +- ``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're importing a + private module. -This approach is also implemented in the tooling that automatically builds API reference docs. +This approach is also implemented in the tooling that automatically builds API reference +docs. Flower public API of private packages ------------------------------------- -We also use this to define the public API of private subpackages. -Public, in this context, means the API that other ``flwr`` subpackages should use. -For example, ``flwr.server.driver`` is a private subpackage (it's not exported via ``src/py/flwr/server/__init__.py``'s ``__all__``). +We also use this to define the public API of private subpackages. Public, in this +context, means the API that other ``flwr`` subpackages should use. For example, +``flwr.server.driver`` is a private subpackage (it's not exported via +``src/py/flwr/server/__init__.py``'s ``__all__``). -Still, the private sub-package ``flwr.server.driver`` defines a "public" API using ``__all__`` in ``src/py/flwr/server/driver/__init__.py``: +Still, the private sub-package ``flwr.server.driver`` defines a "public" API using +``__all__`` in ``src/py/flwr/server/driver/__init__.py``: .. code-block:: python - from .driver import Driver - from .grpc_driver import GrpcDriver - from .inmemory_driver import InMemoryDriver - - __all__ = [ - "Driver", - "GrpcDriver", - "InMemoryDriver", - ] - -The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` are never used by Flower framework users, only by other parts of the Flower framework codebase. -Those other parts of the codebase import, for example, ``InMemoryDriver`` using ``from flwr.server.driver import InMemoryDriver`` (i.e., the ``InMemoryDriver`` exported via ``__all__``), not ``from flwr.server.driver.in_memory_driver import InMemoryDriver`` (``in_memory_driver.py`` is the module containing the actual ``InMemoryDriver`` class definition). - -This is because ``flwr.server.driver`` defines a public interface for other ``flwr`` subpackages. -This allows codeowners of ``flwr.server.driver`` to refactor the package without breaking other ``flwr``-internal users. + from .driver import Driver + from .grpc_driver import GrpcDriver + from .inmemory_driver import InMemoryDriver + + __all__ = [ + "Driver", + "GrpcDriver", + "InMemoryDriver", + ] + +The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` are never used +by Flower framework users, only by other parts of the Flower framework codebase. Those +other parts of the codebase import, for example, ``InMemoryDriver`` using ``from +flwr.server.driver import InMemoryDriver`` (i.e., the ``InMemoryDriver`` exported via +``__all__``), not ``from flwr.server.driver.in_memory_driver import InMemoryDriver`` +(``in_memory_driver.py`` is the module containing the actual ``InMemoryDriver`` class +definition). + +This is because ``flwr.server.driver`` defines a public interface for other ``flwr`` +subpackages. This allows codeowners of ``flwr.server.driver`` to refactor the package +without breaking other ``flwr``-internal users. diff --git a/doc/source/contributor-how-to-build-docker-images.rst b/doc/source/contributor-how-to-build-docker-images.rst index d6acad4afa03..0b3ce243ce50 100644 --- a/doc/source/contributor-how-to-build-docker-images.rst +++ b/doc/source/contributor-how-to-build-docker-images.rst @@ -2,153 +2,161 @@ How to Build Docker Flower Images Locally ========================================= Flower provides pre-made docker images on `Docker Hub `_ -that include all necessary dependencies for running the SuperLink, SuperNode or ServerApp. -You can also build your own custom docker images from scratch with a different version of Python -or Linux distribution (Ubuntu/Alpine) if that is what you need. In this guide, we will explain what -images exist and how to build them locally. +that include all necessary dependencies for running the SuperLink, SuperNode or +ServerApp. You can also build your own custom docker images from scratch with a +different version of Python or Linux distribution (Ubuntu/Alpine) if that is what you +need. In this guide, we will explain what images exist and how to build them locally. -Before we can start, we need to meet a few prerequisites in our local development environment. +Before we can start, we need to meet a few prerequisites in our local development +environment. -#. Clone the ``flower`` repository. +1. Clone the ``flower`` repository. - .. code-block:: bash + .. code-block:: bash - $ git clone --depth=1 https://github.com/adap/flower.git && cd flower + $ git clone --depth=1 https://github.com/adap/flower.git && cd flower -#. Verify the Docker daemon is running. +2. Verify the Docker daemon is running. - The build instructions that assemble the images are located in the respective Dockerfiles. You - can find them in the subdirectories of ``src/docker``. + The build instructions that assemble the images are located in the respective + Dockerfiles. You can find them in the subdirectories of ``src/docker``. - Flower Docker images are configured via build arguments. Through build arguments, we can make the - creation of images more flexible. For example, in the base image, we can specify the version of - Python to install using the ``PYTHON_VERSION`` build argument. Some of the build arguments have - default values, others must be specified when building the image. All available build arguments for - each image are listed in one of the tables below. + Flower Docker images are configured via build arguments. Through build arguments, we + can make the creation of images more flexible. For example, in the base image, we can + specify the version of Python to install using the ``PYTHON_VERSION`` build argument. + Some of the build arguments have default values, others must be specified when + building the image. All available build arguments for each image are listed in one of + the tables below. Building the Base Image ----------------------- .. list-table:: - :widths: 25 45 15 15 - :header-rows: 1 - - * - Build argument - - Description - - Required - - Example - * - ``DISTRO`` - - The Linux distribution to use as the base image. - - No - - ``ubuntu`` - * - ``DISTRO_VERSION`` - - Version of the Linux distribution. - - No - - :substitution-code:`|ubuntu_version|` - * - ``PYTHON_VERSION`` - - Version of ``python`` to be installed. - - No - - ``3.11`` or ``3.11.1`` - * - ``PIP_VERSION`` - - Version of ``pip`` to be installed. - - Yes - - :substitution-code:`|pip_version|` - * - ``SETUPTOOLS_VERSION`` - - Version of ``setuptools`` to be installed. - - Yes - - :substitution-code:`|setuptools_version|` - * - ``FLWR_VERSION`` - - Version of Flower to be installed. - - Yes - - :substitution-code:`|stable_flwr_version|` - * - ``FLWR_PACKAGE`` - - The Flower package to be installed. - - No - - ``flwr`` or ``flwr-nightly`` - * - ``FLWR_VERSION_REF`` - - A `direct reference `_ without the ``@`` specifier. If both ``FLWR_VERSION`` and ``FLWR_VERSION_REF`` are specified, the ``FLWR_VERSION_REF`` has precedence. - - No - - `Direct Reference Examples`_ - -The following example creates a base Ubuntu/Alpine image with Python ``3.11.0``, -pip :substitution-code:`|pip_version|`, setuptools :substitution-code:`|setuptools_version|` + :widths: 25 45 15 15 + :header-rows: 1 + + - - Build argument + - Description + - Required + - Example + - - ``DISTRO`` + - The Linux distribution to use as the base image. + - No + - ``ubuntu`` + - - ``DISTRO_VERSION`` + - Version of the Linux distribution. + - No + - :substitution-code:`|ubuntu_version|` + - - ``PYTHON_VERSION`` + - Version of ``python`` to be installed. + - No + - ``3.11`` or ``3.11.1`` + - - ``PIP_VERSION`` + - Version of ``pip`` to be installed. + - Yes + - :substitution-code:`|pip_version|` + - - ``SETUPTOOLS_VERSION`` + - Version of ``setuptools`` to be installed. + - Yes + - :substitution-code:`|setuptools_version|` + - - ``FLWR_VERSION`` + - Version of Flower to be installed. + - Yes + - :substitution-code:`|stable_flwr_version|` + - - ``FLWR_PACKAGE`` + - The Flower package to be installed. + - No + - ``flwr`` or ``flwr-nightly`` + - - ``FLWR_VERSION_REF`` + - A `direct reference + `_ + without the ``@`` specifier. If both ``FLWR_VERSION`` and ``FLWR_VERSION_REF`` + are specified, the ``FLWR_VERSION_REF`` has precedence. + - No + - `Direct Reference Examples`_ + +The following example creates a base Ubuntu/Alpine image with Python ``3.11.0``, pip +:substitution-code:`|pip_version|`, setuptools :substitution-code:`|setuptools_version|` and Flower :substitution-code:`|stable_flwr_version|`: .. code-block:: bash - :substitutions: + :substitutions: - $ cd src/docker/base/ - $ docker build \ - --build-arg PYTHON_VERSION=3.11.0 \ - --build-arg FLWR_VERSION=|stable_flwr_version| \ - --build-arg PIP_VERSION=|pip_version| \ - --build-arg SETUPTOOLS_VERSION=|setuptools_version| \ - -t flwr_base:0.1.0 . + $ cd src/docker/base/ + $ docker build \ + --build-arg PYTHON_VERSION=3.11.0 \ + --build-arg FLWR_VERSION=|stable_flwr_version| \ + --build-arg PIP_VERSION=|pip_version| \ + --build-arg SETUPTOOLS_VERSION=|setuptools_version| \ + -t flwr_base:0.1.0 . -In this example, we specify our image name as ``flwr_base`` and the tag as ``0.1.0``. Remember that the build arguments as well -as the name and tag can be adapted to your needs. These values serve as examples only. +In this example, we specify our image name as ``flwr_base`` and the tag as ``0.1.0``. +Remember that the build arguments as well as the name and tag can be adapted to your +needs. These values serve as examples only. Building a Flower Binary Image ------------------------------ .. list-table:: - :widths: 25 45 15 15 - :header-rows: 1 - - * - Build argument - - Description - - Required - - Example - * - ``BASE_REPOSITORY`` - - The repository name of the base image. - - No - - ``flwr/base`` - * - ``BASE_IMAGE`` - - The Tag of the Flower base image. - - Yes - - :substitution-code:`|stable_flwr_version|-py3.11-ubuntu|ubuntu_version|` - -For example, to build a SuperLink image with the latest Flower version, Python 3.11 and Ubuntu 22.04, run the following: + :widths: 25 45 15 15 + :header-rows: 1 + + - - Build argument + - Description + - Required + - Example + - - ``BASE_REPOSITORY`` + - The repository name of the base image. + - No + - ``flwr/base`` + - - ``BASE_IMAGE`` + - The Tag of the Flower base image. + - Yes + - :substitution-code:`|stable_flwr_version|-py3.11-ubuntu|ubuntu_version|` + +For example, to build a SuperLink image with the latest Flower version, Python 3.11 and +Ubuntu 22.04, run the following: .. code-block:: bash - :substitutions: + :substitutions: - $ cd src/docker/superlink - $ docker build \ - --build-arg BASE_IMAGE=|stable_flwr_version|-py3.11-ubuntu22.04 \ - -t flwr_superlink:0.1.0 . + $ cd src/docker/superlink + $ docker build \ + --build-arg BASE_IMAGE=|stable_flwr_version|-py3.11-ubuntu22.04 \ + -t flwr_superlink:0.1.0 . -If you want to use your own base image instead of the official Flower base image, all you need to do -is set the ``BASE_REPOSITORY`` build argument to ``flwr_base`` (as we've specified above). +If you want to use your own base image instead of the official Flower base image, all +you need to do is set the ``BASE_REPOSITORY`` build argument to ``flwr_base`` (as we've +specified above). .. code-block:: bash - $ cd src/docker/superlink/ - $ docker build \ - --build-arg BASE_REPOSITORY=flwr_base \ - --build-arg BASE_IMAGE=0.1.0 - -t flwr_superlink:0.1.0 . + $ cd src/docker/superlink/ + $ docker build \ + --build-arg BASE_REPOSITORY=flwr_base \ + --build-arg BASE_IMAGE=0.1.0 + -t flwr_superlink:0.1.0 . After creating the image, we can test whether the image is working: .. code-block:: bash - $ docker run --rm flwr_superlink:0.1.0 --help + $ docker run --rm flwr_superlink:0.1.0 --help Direct Reference Examples ------------------------- .. code-block:: bash - :substitutions: + :substitutions: - # main branch - git+https://github.com/adap/flower.git@main + # main branch + git+https://github.com/adap/flower.git@main - # commit hash - git+https://github.com/adap/flower.git@1187c707f1894924bfa693d99611cf6f93431835 + # commit hash + git+https://github.com/adap/flower.git@1187c707f1894924bfa693d99611cf6f93431835 - # tag - git+https://github.com/adap/flower.git@|stable_flwr_version| + # tag + git+https://github.com/adap/flower.git@|stable_flwr_version| - # artifact store - https://artifact.flower.ai/py/main/latest/flwr-|stable_flwr_version|-py3-none-any.whl + # artifact store + https://artifact.flower.ai/py/main/latest/flwr-|stable_flwr_version|-py3-none-any.whl diff --git a/doc/source/contributor-how-to-contribute-translations.rst b/doc/source/contributor-how-to-contribute-translations.rst index ba59901cf1c4..5fff62833b0e 100644 --- a/doc/source/contributor-how-to-contribute-translations.rst +++ b/doc/source/contributor-how-to-contribute-translations.rst @@ -2,70 +2,67 @@ Contribute translations ======================= Since `Flower 1.5 -`_ we -have introduced translations to our doc pages, but, as you might have noticed, -the translations are often imperfect. If you speak languages other than -English, you might be able to help us in our effort to make Federated Learning -accessible to as many people as possible by contributing to those translations! -This might also be a great opportunity for those wanting to become open source -contributors with little prerequisites. +`_ we have +introduced translations to our doc pages, but, as you might have noticed, the +translations are often imperfect. If you speak languages other than English, you might +be able to help us in our effort to make Federated Learning accessible to as many people +as possible by contributing to those translations! This might also be a great +opportunity for those wanting to become open source contributors with little +prerequisites. Our translation project is publicly available over on `Weblate -`_, this where most -of the work will happen. +`_, this where most of the +work will happen. Contribute to existing languages -------------------------------- .. youtube:: 10_Xfy5BOfQ - :width: 100% + :width: 100% -The first thing you will need to do in order to contribute is to create a -free Weblate account on this `page -`_. More information -about profile settings can be found `here +The first thing you will need to do in order to contribute is to create a free Weblate +account on this `page `_. More +information about profile settings can be found `here `_. -Once you are signed in to Weblate, you can navigate to the `Flower Framework -project `_. Here, -you should see the different existing languages that can be found on the -website. +Once you are signed in to Weblate, you can navigate to the `Flower Framework project +`_. Here, you should see the +different existing languages that can be found on the website. -Once you have selected the language you want to contribute to, you should see a -similar interface to this: +Once you have selected the language you want to contribute to, you should see a similar +interface to this: - .. image:: _static/weblate_status.png + .. image:: _static/weblate_status.png -The most straight forward option here is to click on the ``Translate`` button -on the top right (in the ``Translation status`` section). This will -automatically bring you to the translation interface for untranslated strings. +The most straight forward option here is to click on the ``Translate`` button on the top +right (in the ``Translation status`` section). This will automatically bring you to the +translation interface for untranslated strings. This is what the interface looks like: - .. image:: _static/weblate_interface.png + .. image:: _static/weblate_interface.png -You input your translation in the text box at the top and then, once you are -happy with it, you either press ``Save and continue`` (to save the translation -and go to the next untranslated string), ``Save and stay`` (to save the -translation and stay on the same page), ``Suggest`` (to add your translation to -suggestions for other users to view), or ``Skip`` (to go to the next -untranslated string without saving anything). +You input your translation in the text box at the top and then, once you are happy with +it, you either press ``Save and continue`` (to save the translation and go to the next +untranslated string), ``Save and stay`` (to save the translation and stay on the same +page), ``Suggest`` (to add your translation to suggestions for other users to view), or +``Skip`` (to go to the next untranslated string without saving anything). In order to help with the translations, you can see on the bottom the ``Nearby -strings``, the ``Comments`` (from other contributors), the ``Automatic -suggestions`` (from machine translation engines), the translations in ``Other -languages``, and the ``History`` of translations for this string. +strings``, the ``Comments`` (from other contributors), the ``Automatic suggestions`` +(from machine translation engines), the translations in ``Other languages``, and the +``History`` of translations for this string. -On the right, under the ``String information`` section, you can also click the -link under ``Source string location`` in order to view the source of the doc -file containing the string. +On the right, under the ``String information`` section, you can also click the link +under ``Source string location`` in order to view the source of the doc file containing +the string. -For more information about translating using Weblate, you can check out this -`in-depth guide `_. +For more information about translating using Weblate, you can check out this `in-depth +guide `_. Add new languages ----------------- -If you want to add a new language, you will first have to contact us, either on -`Slack `_, or by opening an issue on our `GitHub -repo `_. +If you want to add a new language, you will first have to contact us, either on `Slack +`_, or by opening an issue on our `GitHub repo +`_. diff --git a/doc/source/contributor-how-to-develop-in-vscode-dev-containers.rst b/doc/source/contributor-how-to-develop-in-vscode-dev-containers.rst index c861457b6edc..79f52f8d8f6f 100644 --- a/doc/source/contributor-how-to-develop-in-vscode-dev-containers.rst +++ b/doc/source/contributor-how-to-develop-in-vscode-dev-containers.rst @@ -1,24 +1,47 @@ Develop in VSCode Dev Containers ================================ -When working on the Flower framework we want to ensure that all contributors use the same developer environment to format code or run tests. For this purpose we are using the VSCode Remote Containers extension. What is it? Read the following quote: - - - The Visual Studio Code Remote - Containers extension lets you use a Docker container as a fully-featured development environment. It allows you to open any folder inside (or mounted into) a container and take advantage of Visual Studio Code's full feature set. A :code:`devcontainer.json` file in your project tells VS Code how to access (or create) a development container with a well-defined tool and runtime stack. This container can be used to run an application or to separate tools, libraries, or runtimes needed for working with a codebase. - - Workspace files are mounted from the local file system or copied or cloned into the container. Extensions are installed and run inside the container, where they have full access to the tools, platform, and file system. This means that you can seamlessly switch your entire development environment just by connecting to a different container. - -Source: `Official VSCode documentation `_ - +When working on the Flower framework we want to ensure that all contributors use the +same developer environment to format code or run tests. For this purpose we are using +the VSCode Remote Containers extension. What is it? Read the following quote: + + The Visual Studio Code Remote - Containers extension lets you use a Docker container + as a fully-featured development environment. It allows you to open any folder inside + (or mounted into) a container and take advantage of Visual Studio Code's full + feature set. A ``devcontainer.json`` file in your project tells VS Code how to + access (or create) a development container with a well-defined tool and runtime + stack. This container can be used to run an application or to separate tools, + libraries, or runtimes needed for working with a codebase. + + Workspace files are mounted from the local file system or copied or cloned into the + container. Extensions are installed and run inside the container, where they have + full access to the tools, platform, and file system. This means that you can + seamlessly switch your entire development environment just by connecting to a + different container. + +Source: `Official VSCode documentation +`_ Getting started --------------- -Configuring and setting up the :code:`Dockerfile` as well the configuration for the devcontainer can be a bit more involved. The good thing is you don't have to do it. Usually it should be enough to install `Docker `_ on your system and ensure its available on your command line. Additionally, install the `VSCode Containers Extension `_. - -Now you should be good to go. When starting VSCode, it will ask you to run in the container environment and - if you confirm - automatically build the container and use it. To manually instruct VSCode to use the devcontainer, you can, after installing the extension, click the green area in the bottom left corner of your VSCode window and select the option *(Re)Open Folder in Container*. - -In some cases your setup might be more involved. For those cases consult the following sources: - -* `Developing inside a Container `_ -* `Remote development in Containers `_ +Configuring and setting up the ``Dockerfile`` as well the configuration for the +devcontainer can be a bit more involved. The good thing is you don't have to do it. +Usually it should be enough to install `Docker +`_ on your system and ensure its available on +your command line. Additionally, install the `VSCode Containers Extension +`_. + +Now you should be good to go. When starting VSCode, it will ask you to run in the +container environment and - if you confirm - automatically build the container and use +it. To manually instruct VSCode to use the devcontainer, you can, after installing the +extension, click the green area in the bottom left corner of your VSCode window and +select the option *(Re)Open Folder in Container*. + +In some cases your setup might be more involved. For those cases consult the following +sources: + +- `Developing inside a Container + `_ +- `Remote development in Containers + `_ diff --git a/doc/source/contributor-how-to-install-development-versions.rst b/doc/source/contributor-how-to-install-development-versions.rst index 0f0773c85e73..61c123a24309 100644 --- a/doc/source/contributor-how-to-install-development-versions.rst +++ b/doc/source/contributor-how-to-install-development-versions.rst @@ -7,10 +7,13 @@ Install development versions of Flower Using Poetry (recommended) ~~~~~~~~~~~~~~~~~~~~~~~~~~ -Install a ``flwr`` pre-release from PyPI: update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall (don't forget to delete ``poetry.lock`` (``rm poetry.lock``) before running ``poetry install``). +Install a ``flwr`` pre-release from PyPI: update the ``flwr`` dependency in +``pyproject.toml`` and then reinstall (don't forget to delete ``poetry.lock`` (``rm +poetry.lock``) before running ``poetry install``). - ``flwr = { version = "1.0.0a0", allow-prereleases = true }`` (without extras) -- ``flwr = { version = "1.0.0a0", allow-prereleases = true, extras = ["simulation"] }`` (with extras) +- ``flwr = { version = "1.0.0a0", allow-prereleases = true, extras = ["simulation"] }`` + (with extras) Install ``flwr`` from a local copy of the Flower source code via ``pyproject.toml``: @@ -20,9 +23,11 @@ Install ``flwr`` from a local copy of the Flower source code via ``pyproject.tom Install ``flwr`` from a local wheel file via ``pyproject.toml``: - ``flwr = { path = "../../dist/flwr-1.8.0-py3-none-any.whl" }`` (without extras) -- ``flwr = { path = "../../dist/flwr-1.8.0-py3-none-any.whl", extras = ["simulation"] }`` (with extras) +- ``flwr = { path = "../../dist/flwr-1.8.0-py3-none-any.whl", extras = ["simulation"] + }`` (with extras) -Please refer to the Poetry documentation for further details: `Poetry Dependency Specification `_ +Please refer to the Poetry documentation for further details: `Poetry Dependency +Specification `_ Using pip (recommended on Colab) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -32,18 +37,21 @@ Install a ``flwr`` pre-release from PyPI: - ``pip install -U --pre flwr`` (without extras) - ``pip install -U --pre 'flwr[simulation]'`` (with extras) -Python packages can be installed from git repositories. Use one of the following commands to install the Flower directly from GitHub. +Python packages can be installed from git repositories. Use one of the following +commands to install the Flower directly from GitHub. Install ``flwr`` from the default GitHub branch (``main``): - ``pip install flwr@git+https://github.com/adap/flower.git`` (without extras) -- ``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'`` (with extras) +- ``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'`` (with + extras) Install ``flwr`` from a specific GitHub branch (``branch-name``): -- ``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` (without extras) -- ``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git@branch-name'`` (with extras) - +- ``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` (without + extras) +- ``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git@branch-name'`` + (with extras) Open Jupyter Notebooks on Google Colab -------------------------------------- @@ -52,12 +60,15 @@ Open the notebook ``doc/source/tutorial-series-get-started-with-flower-pytorch.i - https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb -Open a development version of the same notebook from branch `branch-name` by changing ``main`` to ``branch-name`` (right after ``blob``): +Open a development version of the same notebook from branch `branch-name` by changing +``main`` to ``branch-name`` (right after ``blob``): - https://colab.research.google.com/github/adap/flower/blob/branch-name/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb Install a `whl` on Google Colab: -1. In the vertical icon grid on the left hand side, select ``Files`` > ``Upload to session storage`` +1. In the vertical icon grid on the left hand side, select ``Files`` > ``Upload to + session storage`` 2. Upload the whl (e.g., ``flwr-1.8.0-py3-none-any.whl``) -3. Change ``!pip install -q 'flwr[simulation]' torch torchvision matplotlib`` to ``!pip install -q 'flwr-1.8.0-py3-none-any.whl[simulation]' torch torchvision matplotlib`` +3. Change ``!pip install -q 'flwr[simulation]' torch torchvision matplotlib`` to ``!pip + install -q 'flwr-1.8.0-py3-none-any.whl[simulation]' torch torchvision matplotlib`` diff --git a/doc/source/contributor-how-to-release-flower.rst b/doc/source/contributor-how-to-release-flower.rst index bbc0386feeef..b9f92efeb46b 100644 --- a/doc/source/contributor-how-to-release-flower.rst +++ b/doc/source/contributor-how-to-release-flower.rst @@ -1,16 +1,28 @@ Release Flower ============== -This document describes the current release process. It may or may not change in the future. +This document describes the current release process. It may or may not change in the +future. During the release ------------------ -The version number of a release is stated in ``pyproject.toml``. To release a new version of Flower, the following things need to happen (in that order): - -1. Run ``python3 ./dev/update_changelog.py `` in order to add every new change to the changelog (feel free to make manual changes to the changelog afterwards until it looks good). -2. Once the changelog has been updated with all the changes, open a pull request. -3. Once the pull request is merged, tag the release commit with the version number as soon as the PR is merged: ``git tag v`` (notice the ``v`` added before the version number), then ``git push --tags``. This will create a draft release on GitHub containing the correct artifacts and the relevant part of the changelog. +The version number of a release is stated in ``pyproject.toml``. To release a new +version of Flower, the following things need to happen (in that order): + +1. Run ``python3 ./dev/update_changelog.py `` in order to add + every new change to the changelog (feel free to make manual changes to the changelog + afterwards until it looks good). +2. Once the changelog has been updated with all the changes, run + ``./dev/prepare-release-changelog.sh v``, where ```` is the + version stated in ``pyproject.toml`` (notice the ``v`` added before it). This will + replace the ``Unreleased`` header of the changelog by the version and current date, + and it will add a thanking message for the contributors. Open a pull request with + those changes. +3. Once the pull request is merged, tag the release commit with the version number as + soon as the PR is merged: ``git tag v`` (notice the ``v`` added before + the version number), then ``git push --tags``. This will create a draft release on + GitHub containing the correct artifacts and the relevant part of the changelog. 4. Check the draft release on GitHub, and if everything is good, publish it. After the release @@ -22,7 +34,8 @@ Create a pull request which contains the following changes: 2. Update all files which contain the current version number if necessary. 3. Add a new ``Unreleased`` section in ``changelog.md``. -Merge the pull request on the same day (i.e., before a new nightly release gets published to PyPI). +Merge the pull request on the same day (i.e., before a new nightly release gets +published to PyPI). Publishing a pre-release ------------------------ @@ -30,7 +43,8 @@ Publishing a pre-release Pre-release naming ~~~~~~~~~~~~~~~~~~ -PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases MUST use one of the following naming patterns: +PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases MUST use one +of the following naming patterns: - Alpha: ``MAJOR.MINOR.PATCHaN`` - Beta: ``MAJOR.MINOR.PATCHbN`` @@ -43,19 +57,25 @@ Examples include: - ``1.0.0rc0`` - ``1.0.0rc1`` -This is in line with PEP-440 and the recommendations from the Python Packaging -Authority (PyPA): +This is in line with PEP-440 and the recommendations from the Python Packaging Authority +(PyPA): - `PEP-440 `_ -- `PyPA Choosing a versioning scheme `_ +- `PyPA Choosing a versioning scheme + `_ -Note that the approach defined by PyPA is not compatible with SemVer 2.0.0 spec, for details consult the `Semantic Versioning Specification `_ (specifically item 11 on precedence). +Note that the approach defined by PyPA is not compatible with SemVer 2.0.0 spec, for +details consult the `Semantic Versioning Specification +`_ (specifically item 11 on +precedence). Pre-release classification ~~~~~~~~~~~~~~~~~~~~~~~~~~ Should the next pre-release be called alpha, beta, or release candidate? -- RC: feature complete, no known issues (apart from issues that are classified as "won't fix" for the next stable release) - if no issues surface this will become the next stable release +- RC: feature complete, no known issues (apart from issues that are classified as "won't + fix" for the next stable release) - if no issues surface this will become the next + stable release - Beta: feature complete, allowed to have known issues - Alpha: not feature complete, allowed to have known issues diff --git a/doc/source/contributor-how-to-set-up-a-virtual-env.rst b/doc/source/contributor-how-to-set-up-a-virtual-env.rst index a844298fdca9..7e54ed64c9c9 100644 --- a/doc/source/contributor-how-to-set-up-a-virtual-env.rst +++ b/doc/source/contributor-how-to-set-up-a-virtual-env.rst @@ -1,26 +1,33 @@ Set up a virtual env ==================== -It is recommended to run your Python setup within a virtual environment. -This guide shows three different examples how to create a virtual environment with pyenv virtualenv, poetry, or Anaconda. -You can follow the instructions or choose your preferred setup. +It is recommended to run your Python setup within a virtual environment. This guide +shows three different examples how to create a virtual environment with pyenv +virtualenv, poetry, or Anaconda. You can follow the instructions or choose your +preferred setup. Python Version -------------- -Flower requires at least `Python 3.9 `_, but `Python 3.10 `_ or above is recommended. +Flower requires at least `Python 3.9 `_, but `Python 3.10 +`_ or above is recommended. .. note:: - Due to a known incompatibility with `ray `_, - we currently recommend utilizing at most `Python 3.11 `_ for - running Flower simulations. + + Due to a known incompatibility with `ray `_, we + currently recommend utilizing at most `Python 3.11 `_ + for running Flower simulations. Virtualenv with Pyenv/Virtualenv -------------------------------- -One of the recommended virtual environment is `pyenv `_/`virtualenv `_. Please see `Flower examples `_ for details. +One of the recommended virtual environment is `pyenv +`_/`virtualenv +`_. Please see `Flower examples +`_ for details. -Once Pyenv is set up, you can use it to install `Python Version 3.10 `_ or above: +Once Pyenv is set up, you can use it to install `Python Version 3.10 +`_ or above: .. code-block:: shell @@ -32,34 +39,35 @@ Create the virtualenv with: pyenv virtualenv 3.10.12 flower-3.10.12 - Activate the virtualenv by running the following command: .. code-block:: shell echo flower-3.10.12 > .python-version - Virtualenv with Poetry ---------------------- -The Flower examples are based on `Poetry `_ to manage dependencies. After installing Poetry you simply create a virtual environment with: +The Flower examples are based on `Poetry `_ to manage +dependencies. After installing Poetry you simply create a virtual environment with: .. code-block:: shell poetry shell -If you open a new terminal you can activate the previously created virtual environment with the following command: +If you open a new terminal you can activate the previously created virtual environment +with the following command: .. code-block:: shell source $(poetry env info --path)/bin/activate - Virtualenv with Anaconda ------------------------ -If you prefer to use Anaconda for your virtual environment then install and setup the `conda `_ package. After setting it up you can create a virtual environment with: +If you prefer to use Anaconda for your virtual environment then install and setup the +`conda `_ +package. After setting it up you can create a virtual environment with: .. code-block:: shell @@ -71,8 +79,8 @@ and activate the virtual environment with: conda activate flower-3.10.12 - And then? --------- -As soon as you created your virtual environment you clone one of the `Flower examples `_. +As soon as you created your virtual environment you clone one of the `Flower examples +`_. diff --git a/doc/source/contributor-how-to-write-documentation.rst b/doc/source/contributor-how-to-write-documentation.rst index fcd8c5bb18c6..6209530b71e0 100644 --- a/doc/source/contributor-how-to-write-documentation.rst +++ b/doc/source/contributor-how-to-write-documentation.rst @@ -1,14 +1,15 @@ Write documentation =================== - Project layout -------------- -The Flower documentation lives in the ``doc`` directory. The Sphinx-based documentation system supports both reStructuredText (``.rst`` files) and Markdown (``.md`` files). - -Note that, in order to build the documentation locally (with ``poetry run make html``, like described below), `Pandoc `_ needs to be installed on the system. +The Flower documentation lives in the ``doc`` directory. The Sphinx-based documentation +system supports both reStructuredText (``.rst`` files) and Markdown (``.md`` files). +Note that, in order to build the documentation locally (with ``poetry run make html``, +like described below), `Pandoc `_ needs to be +installed on the system. Edit an existing page --------------------- @@ -17,7 +18,6 @@ Edit an existing page 2. Compile the docs: ``cd doc``, then ``poetry run make html`` 3. Open ``doc/build/html/index.html`` in the browser to check the result - Create a new page ----------------- diff --git a/doc/source/contributor-ref-good-first-contributions.rst b/doc/source/contributor-ref-good-first-contributions.rst index 2b8ce88413f5..a715e006f905 100644 --- a/doc/source/contributor-ref-good-first-contributions.rst +++ b/doc/source/contributor-ref-good-first-contributions.rst @@ -1,41 +1,41 @@ Good first contributions ======================== -We welcome contributions to Flower! However, it is not always easy to know -where to start. We therefore put together a few recommendations on where to -start to increase your chances of getting your PR accepted into the Flower -codebase. - +We welcome contributions to Flower! However, it is not always easy to know where to +start. We therefore put together a few recommendations on where to start to increase +your chances of getting your PR accepted into the Flower codebase. Where to start -------------- -Until the Flower core library matures it will be easier to get PR's accepted if -they only touch non-core areas of the codebase. Good candidates to get started -are: +Until the Flower core library matures it will be easier to get PR's accepted if they +only touch non-core areas of the codebase. Good candidates to get started are: - Documentation: What's missing? What could be expressed more clearly? - Baselines: See below. - Examples: See below. - Request for Flower Baselines ---------------------------- -If you are not familiar with Flower Baselines, you should probably check-out our `contributing guide for baselines `_. +If you are not familiar with Flower Baselines, you should probably check-out our +`contributing guide for baselines +`_. -You should then check out the open -`issues `_ for baseline requests. -If you find a baseline that you'd like to work on and that has no assignees, feel free to assign it to yourself and start working on it! +You should then check out the open `issues +`_ +for baseline requests. If you find a baseline that you'd like to work on and that has no +assignees, feel free to assign it to yourself and start working on it! -Otherwise, if you don't find a baseline you'd like to work on, be sure to open a new issue with the baseline request template! +Otherwise, if you don't find a baseline you'd like to work on, be sure to open a new +issue with the baseline request template! Request for examples -------------------- -We wish we had more time to write usage examples because we believe they help -users to get started with building what they want to build. Here are a few -ideas where we'd be happy to accept a PR: +We wish we had more time to write usage examples because we believe they help users to +get started with building what they want to build. Here are a few ideas where we'd be +happy to accept a PR: - Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch - XGBoost diff --git a/doc/source/contributor-ref-secure-aggregation-protocols.rst b/doc/source/contributor-ref-secure-aggregation-protocols.rst index 7107d04b8cd0..347cb2724424 100644 --- a/doc/source/contributor-ref-secure-aggregation-protocols.rst +++ b/doc/source/contributor-ref-secure-aggregation-protocols.rst @@ -1,13 +1,16 @@ Secure Aggregation Protocols ============================ -Include SecAgg, SecAgg+, and LightSecAgg protocol. The LightSecAgg protocol has not been implemented yet, so its diagram and abstraction may not be accurate in practice. -The SecAgg protocol can be considered as a special case of the SecAgg+ protocol. +Include SecAgg, SecAgg+, and LightSecAgg protocol. The LightSecAgg protocol has not been +implemented yet, so its diagram and abstraction may not be accurate in practice. The +SecAgg protocol can be considered as a special case of the SecAgg+ protocol. -The :code:`SecAgg+` abstraction -------------------------------- +The ``SecAgg+`` abstraction +--------------------------- -In this implementation, each client will be assigned with a unique index (int) for secure aggregation, and thus many python dictionaries used have keys of int type rather than ClientProxy type. +In this implementation, each client will be assigned with a unique index (int) for +secure aggregation, and thus many python dictionaries used have keys of int type rather +than ClientProxy type. .. code-block:: python @@ -15,9 +18,7 @@ In this implementation, each client will be assigned with a unique index (int) f """Abstract base class for the SecAgg+ protocol implementations.""" @abstractmethod - def generate_graph( - self, clients: List[ClientProxy], k: int - ) -> ClientGraph: + def generate_graph(self, clients: List[ClientProxy], k: int) -> ClientGraph: """Build a k-degree undirected graph of clients. Each client will only generate pair-wise masks with its k neighbours. k is equal to the number of clients in SecAgg, i.e., a complete graph. @@ -31,16 +32,16 @@ In this implementation, each client will be assigned with a unique index (int) f @abstractmethod def ask_keys( - self, - clients: List[ClientProxy], ask_keys_ins_list: List[AskKeysIns] + self, clients: List[ClientProxy], ask_keys_ins_list: List[AskKeysIns] ) -> AskKeysResultsAndFailures: """Ask public keys. (AskKeysIns is an empty class, and hence ask_keys_ins_list can be omitted.)""" @abstractmethod def share_keys( self, - clients: List[ClientProxy], public_keys_dict: Dict[int, AskKeysRes], - graph: ClientGraph + clients: List[ClientProxy], + public_keys_dict: Dict[int, AskKeysRes], + graph: ClientGraph, ) -> ShareKeysResultsAndFailures: """Send public keys.""" @@ -48,17 +49,18 @@ In this implementation, each client will be assigned with a unique index (int) f def ask_vectors( clients: List[ClientProxy], forward_packet_list_dict: Dict[int, List[ShareKeysPacket]], - client_instructions=None: Dict[int, FitIns] + client_instructions: Dict[int, FitIns] = None, ) -> AskVectorsResultsAndFailures: """Ask vectors of local model parameters. (If client_instructions is not None, local models will be trained in the ask vectors stage, - rather than trained parallelly as the protocol goes through the previous stages.)""" + rather than trained parallelly as the protocol goes through the previous stages.) + """ @abstractmethod def unmask_vectors( clients: List[ClientProxy], dropout_clients: List[ClientProxy], - graph: ClientGraph + graph: ClientGraph, ) -> UnmaskVectorsResultsAndFailures: """Unmask and compute the aggregated model. UnmaskVectorRes contains shares of keys needed to generate masks.""" @@ -155,10 +157,12 @@ The Flower server will execute and process received results in the following ord deactivate P end -The :code:`LightSecAgg` abstraction ------------------------------------ +The ``LightSecAgg`` abstraction +------------------------------- -In this implementation, each client will be assigned with a unique index (int) for secure aggregation, and thus many python dictionaries used have keys of int type rather than ClientProxy type. +In this implementation, each client will be assigned with a unique index (int) for +secure aggregation, and thus many python dictionaries used have keys of int type rather +than ClientProxy type. .. code-block:: python @@ -174,7 +178,8 @@ In this implementation, each client will be assigned with a unique index (int) f @abstractmethod def ask_encrypted_encoded_masks( self, - clients: List[ClientProxy], public_keys_dict: Dict[int, LightSecAggSetupConfigRes] + clients: List[ClientProxy], + public_keys_dict: Dict[int, LightSecAggSetupConfigRes], ) -> AskEncryptedEncodedMasksResultsAndFailures: """Ask encrypted encoded masks. The protocol adopts Diffie-Hellman keys to build pair-wise secured channels to transfer encoded mask.""" @@ -183,15 +188,16 @@ In this implementation, each client will be assigned with a unique index (int) f self, clients: List[ClientProxy], forward_packet_list_dict: Dict[int, List[EncryptedEncodedMasksPacket]], - client_instructions=None: Dict[int, FitIns] + client_instructions: Dict[int, FitIns] = None, ) -> AskMaskedModelsResultsAndFailures: """Ask the masked local models. (If client_instructions is not None, local models will be trained in the ask vectors stage, - rather than trained parallelly as the protocol goes through the previous stages.)""" + rather than trained parallelly as the protocol goes through the previous stages.) + """ @abstractmethod def ask_aggregated_encoded_masks( - clients: List[ClientProxy] + clients: List[ClientProxy], ) -> AskAggregatedEncodedMasksResultsAndFailures: """Ask aggregated encoded masks""" @@ -272,158 +278,157 @@ Types .. code-block:: python - # the SecAgg+ protocol + # the SecAgg+ protocol + + ClientGraph = Dict[int, List[int]] - ClientGraph = Dict[int, List[int]] + SetupConfigResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, SetupConfigRes]], List[BaseException] + ] - SetupConfigResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, SetupConfigRes]], List[BaseException] - ] + AskKeysResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, AskKeysRes]], List[BaseException] + ] - AskKeysResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskKeysRes]], List[BaseException] - ] + ShareKeysResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, ShareKeysRes]], List[BaseException] + ] - ShareKeysResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, ShareKeysRes]], List[BaseException] - ] + AskVectorsResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, AskVectorsRes]], List[BaseException] + ] - AskVectorsResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskVectorsRes]], List[BaseException] - ] + UnmaskVectorsResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, UnmaskVectorsRes]], List[BaseException] + ] - UnmaskVectorsResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, UnmaskVectorsRes]], List[BaseException] - ] + FitResultsAndFailures = Tuple[List[Tuple[ClientProxy, FitRes]], List[BaseException]] - FitResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, FitRes]], List[BaseException] - ] + @dataclass + class SetupConfigIns: + sec_agg_cfg_dict: Dict[str, Scalar] - @dataclass - class SetupConfigIns: - sec_agg_cfg_dict: Dict[str, Scalar] + @dataclass + class SetupConfigRes: + pass - @dataclass - class SetupConfigRes: - pass + @dataclass + class AskKeysIns: + pass - @dataclass - class AskKeysIns: - pass + @dataclass + class AskKeysRes: + """Ask Keys Stage Response from client to server""" - @dataclass - class AskKeysRes: - """Ask Keys Stage Response from client to server""" - pk1: bytes - pk2: bytes + pk1: bytes + pk2: bytes - @dataclass - class ShareKeysIns: - public_keys_dict: Dict[int, AskKeysRes] + @dataclass + class ShareKeysIns: + public_keys_dict: Dict[int, AskKeysRes] - @dataclass - class ShareKeysPacket: - source: int - destination: int - ciphertext: bytes + @dataclass + class ShareKeysPacket: + source: int + destination: int + ciphertext: bytes - @dataclass - class ShareKeysRes: - share_keys_res_list: List[ShareKeysPacket] + @dataclass + class ShareKeysRes: + share_keys_res_list: List[ShareKeysPacket] - @dataclass - class AskVectorsIns: - ask_vectors_in_list: List[ShareKeysPacket] - fit_ins: FitIns + @dataclass + class AskVectorsIns: + ask_vectors_in_list: List[ShareKeysPacket] + fit_ins: FitIns - @dataclass - class AskVectorsRes: - parameters: Parameters + @dataclass + class AskVectorsRes: + parameters: Parameters - @dataclass - class UnmaskVectorsIns: - available_clients: List[int] - dropout_clients: List[int] + @dataclass + class UnmaskVectorsIns: + available_clients: List[int] + dropout_clients: List[int] - @dataclass - class UnmaskVectorsRes: - share_dict: Dict[int, bytes] + @dataclass + class UnmaskVectorsRes: + share_dict: Dict[int, bytes] - # the LightSecAgg protocol + # the LightSecAgg protocol - LightSecAggSetupConfigResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, LightSecAggSetupConfigRes]], List[BaseException] - ] + LightSecAggSetupConfigResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, LightSecAggSetupConfigRes]], List[BaseException] + ] - AskEncryptedEncodedMasksResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskEncryptedEncodedMasksRes]], List[BaseException] - ] + AskEncryptedEncodedMasksResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, AskEncryptedEncodedMasksRes]], List[BaseException] + ] - AskMaskedModelsResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskMaskedModelsRes]], List[BaseException] - ] + AskMaskedModelsResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, AskMaskedModelsRes]], List[BaseException] + ] - AskAggregatedEncodedMasksResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskAggregatedEncodedMasksRes]], List[BaseException] - ] + AskAggregatedEncodedMasksResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, AskAggregatedEncodedMasksRes]], List[BaseException] + ] - @dataclass - class LightSecAggSetupConfigIns: - sec_agg_cfg_dict: Dict[str, Scalar] + @dataclass + class LightSecAggSetupConfigIns: + sec_agg_cfg_dict: Dict[str, Scalar] - @dataclass - class LightSecAggSetupConfigRes: - pk: bytes + @dataclass + class LightSecAggSetupConfigRes: + pk: bytes - @dataclass - class AskEncryptedEncodedMasksIns: - public_keys_dict: Dict[int, LightSecAggSetupConfigRes] + @dataclass + class AskEncryptedEncodedMasksIns: + public_keys_dict: Dict[int, LightSecAggSetupConfigRes] - @dataclass - class EncryptedEncodedMasksPacket: - source: int - destination: int - ciphertext: bytes + @dataclass + class EncryptedEncodedMasksPacket: + source: int + destination: int + ciphertext: bytes - @dataclass - class AskEncryptedEncodedMasksRes: - packet_list: List[EncryptedEncodedMasksPacket] + @dataclass + class AskEncryptedEncodedMasksRes: + packet_list: List[EncryptedEncodedMasksPacket] - @dataclass - class AskMaskedModelsIns: - packet_list: List[EncryptedEncodedMasksPacket] - fit_ins: FitIns + @dataclass + class AskMaskedModelsIns: + packet_list: List[EncryptedEncodedMasksPacket] + fit_ins: FitIns - @dataclass - class AskMaskedModelsRes: - parameters: Parameters + @dataclass + class AskMaskedModelsRes: + parameters: Parameters - @dataclass - class AskAggregatedEncodedMasksIns: - surviving_clients: List[int] + @dataclass + class AskAggregatedEncodedMasksIns: + surviving_clients: List[int] - @dataclass - class AskAggregatedEncodedMasksRes: - aggregated_encoded_mask: Parameters + @dataclass + class AskAggregatedEncodedMasksRes: + aggregated_encoded_mask: Parameters diff --git a/doc/source/contributor-tutorial-contribute-on-github.rst b/doc/source/contributor-tutorial-contribute-on-github.rst index 6970e7e8a580..22c6c6ef86b0 100644 --- a/doc/source/contributor-tutorial-contribute-on-github.rst +++ b/doc/source/contributor-tutorial-contribute-on-github.rst @@ -1,100 +1,113 @@ Contribute on GitHub ==================== -This guide is for people who want to get involved with Flower, but who are not used to contributing to GitHub projects. - -If you're familiar with how contributing on GitHub works, you can directly checkout our :doc:`getting started guide for contributors `. +This guide is for people who want to get involved with Flower, but who are not used to +contributing to GitHub projects. +If you're familiar with how contributing on GitHub works, you can directly checkout our +:doc:`getting started guide for contributors +`. Setting up the repository ------------------------- 1. **Create a GitHub account and setup Git** - Git is a distributed version control tool. This allows for an entire codebase's history to be stored and every developer's machine. - It is a software that will need to be installed on your local machine, you can follow this `guide `_ to set it up. - - GitHub, itself, is a code hosting platform for version control and collaboration. It allows for everyone to collaborate and work from anywhere on remote repositories. - - If you haven't already, you will need to create an account on `GitHub `_. - - The idea behind the generic Git and GitHub workflow boils down to this: - you download code from a remote repository on GitHub, make changes locally and keep track of them using Git and then you upload your new history back to GitHub. - + Git is a distributed version control tool. This allows for an entire codebase's + history to be stored and every developer's machine. It is a software that will + need to be installed on your local machine, you can follow this `guide + `_ to + set it up. + + GitHub, itself, is a code hosting platform for version control and collaboration. + It allows for everyone to collaborate and work from anywhere on remote + repositories. + + If you haven't already, you will need to create an account on `GitHub + `_. + + The idea behind the generic Git and GitHub workflow boils down to this: you + download code from a remote repository on GitHub, make changes locally and keep + track of them using Git and then you upload your new history back to GitHub. 2. **Forking the Flower repository** - A fork is a personal copy of a GitHub repository. To create one for Flower, you must navigate to ``_ (while connected to your GitHub account) - and click the ``Fork`` button situated on the top right of the page. - - .. image:: _static/fork_button.png + A fork is a personal copy of a GitHub repository. To create one for Flower, you + must navigate to https://github.com/adap/flower (while connected to your GitHub + account) and click the ``Fork`` button situated on the top right of the page. - You can change the name if you want, but this is not necessary as this version of Flower will be yours and will sit inside your own account (i.e., in your own list of repositories). - Once created, you should see on the top left corner that you are looking at your own version of Flower. + .. image:: _static/fork_button.png - .. image:: _static/fork_link.png + You can change the name if you want, but this is not necessary as this version of + Flower will be yours and will sit inside your own account (i.e., in your own list + of repositories). Once created, you should see on the top left corner that you + are looking at your own version of Flower. + .. image:: _static/fork_link.png 3. **Cloning your forked repository** - The next step is to download the forked repository on your machine to be able to make changes to it. - On your forked repository page, you should first click on the ``Code`` button on the right, - this will give you the ability to copy the HTTPS link of the repository. + The next step is to download the forked repository on your machine to be able to + make changes to it. On your forked repository page, you should first click on the + ``Code`` button on the right, this will give you the ability to copy the HTTPS + link of the repository. - .. image:: _static/cloning_fork.png + .. image:: _static/cloning_fork.png - Once you copied the \, you can open a terminal on your machine, navigate to the place you want to download the repository to and type: + Once you copied the \, you can open a terminal on your machine, navigate to + the place you want to download the repository to and type: - .. code-block:: shell + .. code-block:: shell - $ git clone - - This will create a ``flower/`` (or the name of your fork if you renamed it) folder in the current working directory. + $ git clone + This will create a ``flower/`` (or the name of your fork if you renamed it) + folder in the current working directory. 4. **Add origin** - You can then go into the repository folder: - - .. code-block:: shell - - $ cd flower + You can then go into the repository folder: - And here we will need to add an origin to our repository. The origin is the \ of the remote fork repository. - To obtain it, we can do as previously mentioned by going to our fork repository on our GitHub account and copying the link. + .. code-block:: shell - .. image:: _static/cloning_fork.png + $ cd flower - Once the \ is copied, we can type the following command in our terminal: + And here we will need to add an origin to our repository. The origin is the + \ of the remote fork repository. To obtain it, we can do as previously + mentioned by going to our fork repository on our GitHub account and copying the + link. - .. code-block:: shell + .. image:: _static/cloning_fork.png - $ git remote add origin + Once the \ is copied, we can type the following command in our terminal: + .. code-block:: shell + $ git remote add origin 5. **Add upstream** - Now we will add an upstream address to our repository. - Still in the same directory, we must run the following command: + Now we will add an upstream address to our repository. Still in the same + directory, we must run the following command: - .. code-block:: shell + .. code-block:: shell - $ git remote add upstream https://github.com/adap/flower.git + $ git remote add upstream https://github.com/adap/flower.git - The following diagram visually explains what we did in the previous steps: + The following diagram visually explains what we did in the previous steps: - .. image:: _static/github_schema.png + .. image:: _static/github_schema.png - The upstream is the GitHub remote address of the parent repository (in this case Flower), - i.e. the one we eventually want to contribute to and therefore need an up-to-date history of. - The origin is just the GitHub remote address of the forked repository we created, i.e. the copy (fork) in our own account. + The upstream is the GitHub remote address of the parent repository (in this case + Flower), i.e. the one we eventually want to contribute to and therefore need an + up-to-date history of. The origin is just the GitHub remote address of the forked + repository we created, i.e. the copy (fork) in our own account. - To make sure our local version of the fork is up-to-date with the latest changes from the Flower repository, - we can execute the following command: + To make sure our local version of the fork is up-to-date with the latest changes + from the Flower repository, we can execute the following command: - .. code-block:: shell - - $ git pull upstream main + .. code-block:: shell + $ git pull upstream main Setting up the coding environment --------------------------------- -This can be achieved by following this :doc:`getting started guide for contributors ` (note that you won't need to clone the repository). -Once you are able to write code and test it, you can finally start making changes! - +This can be achieved by following this :doc:`getting started guide for contributors +` (note that you won't need to clone +the repository). Once you are able to write code and test it, you can finally start +making changes! Making changes -------------- @@ -112,211 +125,233 @@ And with Flower's repository: $ git pull upstream main 1. **Create a new branch** - To make the history cleaner and easier to work with, it is good practice to - create a new branch for each feature/project that needs to be implemented. - - To do so, just run the following command inside the repository's directory: + To make the history cleaner and easier to work with, it is good practice to + create a new branch for each feature/project that needs to be implemented. - .. code-block:: shell + To do so, just run the following command inside the repository's directory: - $ git switch -c + .. code-block:: shell + $ git switch -c 2. **Make changes** - Write great code and create wonderful changes using your favorite editor! - + Write great code and create wonderful changes using your favorite editor! 3. **Test and format your code** - Don't forget to test and format your code! Otherwise your code won't be able to be merged into the Flower repository. - This is done so the codebase stays consistent and easy to understand. - - To do so, we have written a few scripts that you can execute: + Don't forget to test and format your code! Otherwise your code won't be able to + be merged into the Flower repository. This is done so the codebase stays + consistent and easy to understand. - .. code-block:: shell + To do so, we have written a few scripts that you can execute: - $ ./dev/format.sh # to format your code - $ ./dev/test.sh # to test that your code can be accepted - $ ./baselines/dev/format.sh # same as above but for code added to baselines - $ ./baselines/dev/test.sh # same as above but for code added to baselines + .. code-block:: shell + $ ./dev/format.sh # to format your code + $ ./dev/test.sh # to test that your code can be accepted + $ ./baselines/dev/format.sh # same as above but for code added to baselines + $ ./baselines/dev/test.sh # same as above but for code added to baselines 4. **Stage changes** - Before creating a commit that will update your history, you must specify to Git which files it needs to take into account. - - This can be done with: + Before creating a commit that will update your history, you must specify to Git + which files it needs to take into account. - .. code-block:: shell + This can be done with: - $ git add + .. code-block:: shell - To check which files have been modified compared to the last version (last commit) and to see which files are staged for commit, - you can use the :code:`git status` command. + $ git add + To check which files have been modified compared to the last version (last + commit) and to see which files are staged for commit, you can use the ``git + status`` command. 5. **Commit changes** - Once you have added all the files you wanted to commit using :code:`git add`, you can finally create your commit using this command: + Once you have added all the files you wanted to commit using ``git add``, you can + finally create your commit using this command: - .. code-block:: shell + .. code-block:: shell - $ git commit -m "" - - The \ is there to explain to others what the commit does. It should be written in an imperative style and be concise. - An example would be :code:`git commit -m "Add images to README"`. + $ git commit -m "" + The \ is there to explain to others what the commit does. It + should be written in an imperative style and be concise. An example would be + ``git commit -m "Add images to README"``. 6. **Push the changes to the fork** - Once we have committed our changes, we have effectively updated our local history, but GitHub has no way of knowing this unless we push - our changes to our origin's remote address: - - .. code-block:: shell + Once we have committed our changes, we have effectively updated our local + history, but GitHub has no way of knowing this unless we push our changes to our + origin's remote address: - $ git push -u origin + .. code-block:: shell - Once this is done, you will see on the GitHub that your forked repo was updated with the changes you have made. + $ git push -u origin + Once this is done, you will see on the GitHub that your forked repo was updated + with the changes you have made. Creating and merging a pull request (PR) ---------------------------------------- 1. **Create the PR** - Once you have pushed changes, on the GitHub webpage of your repository you should see the following message: - - .. image:: _static/compare_and_pr.png + Once you have pushed changes, on the GitHub webpage of your repository you should + see the following message: - Otherwise you can always find this option in the ``Branches`` page. + .. image:: _static/compare_and_pr.png - Once you click the ``Compare & pull request`` button, you should see something similar to this: + Otherwise you can always find this option in the ``Branches`` page. - .. image:: _static/creating_pr.png + Once you click the ``Compare & pull request`` button, you should see something + similar to this: - At the top you have an explanation of which branch will be merged where: + .. image:: _static/creating_pr.png - .. image:: _static/merging_branch.png + At the top you have an explanation of which branch will be merged where: - In this example you can see that the request is to merge the branch ``doc-fixes`` from my forked repository to branch ``main`` from the Flower repository. + .. image:: _static/merging_branch.png - The title should be changed to adhere to the :ref:`pr_title_format` guidelines, otherwise it won't be possible to merge the PR. So in this case, - a correct title might be ``docs(framework:skip) Fix typos``. + In this example you can see that the request is to merge the branch ``doc-fixes`` + from my forked repository to branch ``main`` from the Flower repository. - The input box in the middle is there for you to describe what your PR does and to link it to existing issues. - We have placed comments (that won't be rendered once the PR is opened) to guide you through the process. + The title should be changed to adhere to the :ref:`pr_title_format` guidelines, + otherwise it won't be possible to merge the PR. So in this case, a correct title + might be ``docs(framework:skip) Fix typos``. - It is important to follow the instructions described in comments. + The input box in the middle is there for you to describe what your PR does and to + link it to existing issues. We have placed comments (that won't be rendered once + the PR is opened) to guide you through the process. - At the bottom you will find the button to open the PR. This will notify reviewers that a new PR has been opened and - that they should look over it to merge or to request changes. + It is important to follow the instructions described in comments. - If your PR is not yet ready for review, and you don't want to notify anyone, you have the option to create a draft pull request: + At the bottom you will find the button to open the PR. This will notify reviewers + that a new PR has been opened and that they should look over it to merge or to + request changes. - .. image:: _static/draft_pr.png + If your PR is not yet ready for review, and you don't want to notify anyone, you + have the option to create a draft pull request: + .. image:: _static/draft_pr.png 2. **Making new changes** - Once the PR has been opened (as draft or not), you can still push new commits to it the same way we did before, by making changes to the branch associated with the PR. - + Once the PR has been opened (as draft or not), you can still push new commits to + it the same way we did before, by making changes to the branch associated with + the PR. 3. **Review the PR** - Once the PR has been opened or once the draft PR has been marked as ready, a review from code owners will be automatically requested: - - .. image:: _static/opened_pr.png - - Code owners will then look into the code, ask questions, request changes or validate the PR. + Once the PR has been opened or once the draft PR has been marked as ready, a + review from code owners will be automatically requested: - Merging will be blocked if there are ongoing requested changes. + .. image:: _static/opened_pr.png - .. image:: _static/changes_requested.png + Code owners will then look into the code, ask questions, request changes or + validate the PR. - To resolve them, just push the necessary changes to the branch associated with the PR: + Merging will be blocked if there are ongoing requested changes. - .. image:: _static/make_changes.png + .. image:: _static/changes_requested.png - And resolve the conversation: + To resolve them, just push the necessary changes to the branch associated with + the PR: - .. image:: _static/resolve_conv.png + .. image:: _static/make_changes.png - Once all the conversations have been resolved, you can re-request a review. + And resolve the conversation: + .. image:: _static/resolve_conv.png + Once all the conversations have been resolved, you can re-request a review. 4. **Once the PR is merged** - If all the automatic tests have passed and reviewers have no more changes to request, they can approve the PR and merge it. + If all the automatic tests have passed and reviewers have no more changes to + request, they can approve the PR and merge it. - .. image:: _static/merging_pr.png + .. image:: _static/merging_pr.png - Once it is merged, you can delete the branch on GitHub (a button should appear to do so) and also delete it locally by doing: + Once it is merged, you can delete the branch on GitHub (a button should appear to + do so) and also delete it locally by doing: - .. code-block:: shell + .. code-block:: shell - $ git switch main - $ git branch -D + $ git switch main + $ git branch -D - Then you should update your forked repository by doing: + Then you should update your forked repository by doing: - .. code-block:: shell - - $ git pull upstream main # to update the local repository - $ git push origin main # to push the changes to the remote repository + .. code-block:: shell + $ git pull upstream main # to update the local repository + $ git push origin main # to push the changes to the remote repository Example of first contribution ----------------------------- Problem -******* +~~~~~~~ -For our documentation, we've started to use the `Diàtaxis framework `_. +For our documentation, we've started to use the `Diàtaxis framework +`_. -Our "How to" guides should have titles that continue the sentence "How to …", for example, "How to upgrade to Flower 1.0". +Our "How to" guides should have titles that continue the sentence "How to …", for +example, "How to upgrade to Flower 1.0". -Most of our guides do not follow this new format yet, and changing their title is (unfortunately) more involved than one might think. +Most of our guides do not follow this new format yet, and changing their title is +(unfortunately) more involved than one might think. -This issue is about changing the title of a doc from present continuous to present simple. +This issue is about changing the title of a doc from present continuous to present +simple. -Let's take the example of "Saving Progress" which we changed to "Save Progress". Does this pass our check? +Let's take the example of "Saving Progress" which we changed to "Save Progress". Does +this pass our check? Before: "How to saving progress" ❌ After: "How to save progress" ✅ Solution -******** +~~~~~~~~ -This is a tiny change, but it'll allow us to test your end-to-end setup. After cloning and setting up the Flower repo, here's what you should do: +This is a tiny change, but it'll allow us to test your end-to-end setup. After cloning +and setting up the Flower repo, here's what you should do: - Find the source file in ``doc/source`` -- Make the change in the ``.rst`` file (beware, the dashes under the title should be the same length as the title itself) -- Build the docs and `check the result `_ +- Make the change in the ``.rst`` file (beware, the dashes under the title should be the + same length as the title itself) +- Build the docs and `check the result + `_ Rename file -::::::::::: ++++++++++++ -You might have noticed that the file name still reflects the old wording. -If we just change the file, then we break all existing links to it - it is **very important** to avoid that, breaking links can harm our search engine ranking. +You might have noticed that the file name still reflects the old wording. If we just +change the file, then we break all existing links to it - it is **very important** to +avoid that, breaking links can harm our search engine ranking. Here's how to change the file name: - Change the file name to ``save-progress.rst`` - Add a redirect rule to ``doc/source/conf.py`` -This will cause a redirect from ``saving-progress.html`` to ``save-progress.html``, old links will continue to work. +This will cause a redirect from ``saving-progress.html`` to ``save-progress.html``, old +links will continue to work. Apply changes in the index file -::::::::::::::::::::::::::::::: ++++++++++++++++++++++++++++++++ -For the lateral navigation bar to work properly, it is very important to update the ``index.rst`` file as well. -This is where we define the whole arborescence of the navbar. +For the lateral navigation bar to work properly, it is very important to update the +``index.rst`` file as well. This is where we define the whole arborescence of the +navbar. - Find and modify the file name in ``index.rst`` Open PR -::::::: ++++++++ -- Commit the changes (commit messages are always imperative: "Do something", in this case "Change …") +- Commit the changes (commit messages are always imperative: "Do something", in this + case "Change …") - Push the changes to your fork - Open a PR (as shown above) with title ``docs(framework) Update how-to guide title`` - Wait for it to be approved! - Congrats! 🥳 You're now officially a Flower contributor! - Next steps ---------- -Once you have made your first PR, and want to contribute more, be sure to check out the following : - -- :doc:`Good first contributions `, where you should particularly look into the :code:`baselines` contributions. +Once you have made your first PR, and want to contribute more, be sure to check out the +following : +- :doc:`Good first contributions `, where you + should particularly look into the ``baselines`` contributions. Appendix -------- @@ -324,7 +359,7 @@ Appendix .. _pr_title_format: PR title format -*************** +~~~~~~~~~~~~~~~ We enforce the following PR title format: @@ -334,9 +369,10 @@ We enforce the following PR title format: (or ``(:skip) `` to ignore the PR in the changelog) -Where ```` needs to be in ``{ci, fix, feat, docs, refactor, break}``, ```` -should be in ``{framework, baselines, datasets, examples, or '*' when modifying multiple projects which requires the ':skip' flag to be used}``, -and ```` starts with a capitalised verb in the imperative mood. +Where ```` needs to be in ``{ci, fix, feat, docs, refactor, break}``, +```` should be in ``{framework, baselines, datasets, examples, or '*' when +modifying multiple projects which requires the ':skip' flag to be used}``, and +```` starts with a capitalised verb in the imperative mood. Valid examples: diff --git a/doc/source/contributor-tutorial-get-started-as-a-contributor.rst b/doc/source/contributor-tutorial-get-started-as-a-contributor.rst index 3dac8647fa33..60b3ebdef743 100644 --- a/doc/source/contributor-tutorial-get-started-as-a-contributor.rst +++ b/doc/source/contributor-tutorial-get-started-as-a-contributor.rst @@ -9,163 +9,187 @@ Prerequisites - (Optional) `pyenv `_ - (Optional) `pyenv-virtualenv `_ -Flower uses :code:`pyproject.toml` to manage dependencies and configure -development tools (the ones which support it). Poetry is a build tool which -supports `PEP 517 `_. - +Flower uses ``pyproject.toml`` to manage dependencies and configure development tools +(the ones which support it). Poetry is a build tool which supports `PEP 517 +`_. Developer Machine Setup ----------------------- Preliminaries ~~~~~~~~~~~~~ + Some system-wide dependencies are needed. For macOS -^^^^^^^^^ ++++++++++ + +- Install `homebrew `_. Don't forget the post-installation actions to + add `brew` to your PATH. +- Install `xz` (to install different Python versions) and `pandoc` to build the docs: -* Install `homebrew `_. Don't forget the post-installation actions to add `brew` to your PATH. -* Install `xz` (to install different Python versions) and `pandoc` to build the - docs:: + :: - $ brew install xz pandoc + $ brew install xz pandoc For Ubuntu -^^^^^^^^^^ -Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all necessary -packages:: +++++++++++ - $ apt update - $ apt install build-essential zlib1g-dev libssl-dev libsqlite3-dev \ - libreadline-dev libbz2-dev libffi-dev liblzma-dev pandoc +Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all necessary packages: + +:: + $ apt update + $ apt install build-essential zlib1g-dev libssl-dev libsqlite3-dev \ + libreadline-dev libbz2-dev libffi-dev liblzma-dev pandoc Create Flower Dev Environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -1. Clone the `Flower repository `_ from -GitHub:: +1. Clone the `Flower repository `_ from GitHub: - $ git clone git@github.com:adap/flower.git - $ cd flower +:: + + $ git clone git@github.com:adap/flower.git + $ cd flower + +2. Let's create the Python environment for all-things Flower. If you wish to use + ``pyenv``, we provide two convenience scripts that you can use. If you prefer using + something else than ``pyenv``, create a new environment, activate and skip to the + last point where all packages are installed. -2. Let's create the Python environment for all-things Flower. If you wish to use :code:`pyenv`, we provide two convenience scripts that you can use. If you prefer using something else than :code:`pyenv`, create a new environment, activate and skip to the last point where all packages are installed. +- If you don't have ``pyenv`` installed, the following script that will install it, set + it up, and create the virtual environment (with ``Python 3.9.20`` by default): -* If you don't have :code:`pyenv` installed, the following script that will install it, set it up, and create the virtual environment (with :code:`Python 3.9.20` by default):: + :: - $ ./dev/setup-defaults.sh # once completed, run the bootstrap script + $ ./dev/setup-defaults.sh # once completed, run the bootstrap script -* If you already have :code:`pyenv` installed (along with the :code:`pyenv-virtualenv` plugin), you can use the following convenience script (with :code:`Python 3.9.20` by default):: +- If you already have ``pyenv`` installed (along with the ``pyenv-virtualenv`` plugin), + you can use the following convenience script (with ``Python 3.9.20`` by default): - $ ./dev/venv-create.sh # once completed, run the `bootstrap.sh` script + :: -3. Install the Flower package in development mode (think -:code:`pip install -e`) along with all necessary dependencies:: + $ ./dev/venv-create.sh # once completed, run the `bootstrap.sh` script - (flower-) $ ./dev/bootstrap.sh +3. Install the Flower package in development mode (think ``pip install -e``) along with +all necessary dependencies: + +:: + (flower-) $ ./dev/bootstrap.sh Convenience Scripts ------------------- -The Flower repository contains a number of convenience scripts to make -recurring development tasks easier and less error-prone. See the :code:`/dev` -subdirectory for a full list. The following scripts are amongst the most -important ones: +The Flower repository contains a number of convenience scripts to make recurring +development tasks easier and less error-prone. See the ``/dev`` subdirectory for a full +list. The following scripts are amongst the most important ones: Create/Delete Virtual Environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :: - $ ./dev/venv-create.sh # Default is 3.9.20 - $ ./dev/venv-delete.sh # Default is 3.9.20 + $ ./dev/venv-create.sh # Default is 3.9.20 + $ ./dev/venv-delete.sh # Default is 3.9.20 Compile ProtoBuf Definitions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :: - $ python -m flwr_tool.protoc + $ python -m flwr_tool.protoc Auto-Format Code ~~~~~~~~~~~~~~~~ :: - $ ./dev/format.sh + $ ./dev/format.sh Run Linters and Tests ~~~~~~~~~~~~~~~~~~~~~ :: - $ ./dev/test.sh + $ ./dev/test.sh Add a pre-commit hook ~~~~~~~~~~~~~~~~~~~~~ -Developers may integrate a pre-commit hook into their workflow utilizing the `pre-commit `_ library. The pre-commit hook is configured to execute two primary operations: ``./dev/format.sh`` and ``./dev/test.sh`` scripts. +Developers may integrate a pre-commit hook into their workflow utilizing the `pre-commit +`_ library. The pre-commit hook is configured to +execute two primary operations: ``./dev/format.sh`` and ``./dev/test.sh`` scripts. There are multiple ways developers can use this: 1. Install the pre-commit hook to your local git directory by simply running: :: - - $ pre-commit install - - Each ``git commit`` will trigger the execution of formatting and linting/test scripts. - - If in a hurry, bypass the hook using ``--no-verify`` with the ``git commit`` command. + $ pre-commit install + + - Each ``git commit`` will trigger the execution of formatting and linting/test + scripts. + - If in a hurry, bypass the hook using ``--no-verify`` with the ``git commit`` + command. + :: - - $ git commit --no-verify -m "Add new feature" - -2. For developers who prefer not to install the hook permanently, it is possible to execute a one-time check prior to committing changes by using the following command: - + + $ git commit --no-verify -m "Add new feature" + +2. For developers who prefer not to install the hook permanently, it is possible to + execute a one-time check prior to committing changes by using the following command: + :: - $ pre-commit run --all-files - - This executes the formatting and linting checks/tests on all the files without modifying the default behavior of ``git commit``. + $ pre-commit run --all-files + + This executes the formatting and linting checks/tests on all the files without + modifying the default behavior of ``git commit``. Run Github Actions (CI) locally ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Developers could run the full set of Github Actions workflows under their local -environment by using `Act `_. Please refer to -the installation instructions under the linked repository and run the next -command under Flower main cloned repository folder:: +environment by using `Act `_. Please refer to the +installation instructions under the linked repository and run the next command under +Flower main cloned repository folder: - $ act +:: -The Flower default workflow would run by setting up the required Docker -machines underneath. + $ act +The Flower default workflow would run by setting up the required Docker machines +underneath. Build Release ------------- -Flower uses Poetry to build releases. The necessary command is wrapped in a -simple script:: +Flower uses Poetry to build releases. The necessary command is wrapped in a simple +script: - $ ./dev/build.sh +:: -The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in the -:code:`/dist` subdirectory. + $ ./dev/build.sh +The resulting ``.whl`` and ``.tar.gz`` releases will be stored in the ``/dist`` +subdirectory. Build Documentation ------------------- Flower's documentation uses `Sphinx `_. There's no -convenience script to re-build the documentation yet, but it's pretty easy:: +convenience script to re-build the documentation yet, but it's pretty easy: + +:: - $ cd doc - $ make html + $ cd doc + $ make html This will generate HTML documentation in ``doc/build/html``. -Note that, in order to build the documentation locally -(with ``poetry run make html``, like described below), -`Pandoc `_ needs to be installed on the system. +Note that, in order to build the documentation locally (with ``poetry run make html``, +like described below), `Pandoc `_ needs to be +installed on the system. diff --git a/doc/source/docker/enable-tls.rst b/doc/source/docker/enable-tls.rst index ac604b708f88..f50edb8c651d 100644 --- a/doc/source/docker/enable-tls.rst +++ b/doc/source/docker/enable-tls.rst @@ -1,152 +1,152 @@ Enable TLS for Secure Connections ================================= -When operating in a production environment, it is strongly recommended to enable Transport Layer -Security (TLS) for each Flower Component to ensure secure communication. +When operating in a production environment, it is strongly recommended to enable +Transport Layer Security (TLS) for each Flower Component to ensure secure communication. -To enable TLS, you will need a PEM-encoded root certificate, a PEM-encoded private key and a -PEM-encoded certificate chain. +To enable TLS, you will need a PEM-encoded root certificate, a PEM-encoded private key +and a PEM-encoded certificate chain. .. note:: - For testing purposes, you can generate your own self-signed certificates. The - `Enable SSL connections `__ - page contains a section that will guide you through the process. + For testing purposes, you can generate your own self-signed certificates. The + `Enable SSL connections + `__ + page contains a section that will guide you through the process. +Because Flower containers, by default, run with a non-root user ``app``, the mounted +files and directories must have the proper permissions for the user ID ``49999``. -Because Flower containers, by default, run with a non-root user ``app``, the mounted files and -directories must have the proper permissions for the user ID ``49999``. +For example, to change the user ID of all files in the ``certificates/`` directory, you +can run ``sudo chown -R 49999:49999 certificates/*``. -For example, to change the user ID of all files in the ``certificates/`` directory, you can run -``sudo chown -R 49999:49999 certificates/*``. - -If you later want to delete the directory, you can change the user ID back to the current user -ID by running ``sudo chown -R $USER:$(id -gn) state``. +If you later want to delete the directory, you can change the user ID back to the +current user ID by running ``sudo chown -R $USER:$(id -gn) state``. SuperLink --------- -Assuming all files we need are in the local ``certificates`` directory, we can use the flag -``--volume`` to mount the local directory into the ``/app/certificates/`` directory of the container: +Assuming all files we need are in the local ``certificates`` directory, we can use the +flag ``--volume`` to mount the local directory into the ``/app/certificates/`` directory +of the container: .. code-block:: bash - :substitutions: + :substitutions: - $ docker run --rm \ - --volume ./certificates/:/app/certificates/:ro \ - flwr/superlink:|stable_flwr_version| \ - --ssl-ca-certfile certificates/ca.crt \ - --ssl-certfile certificates/server.pem \ - --ssl-keyfile certificates/server.key + $ docker run --rm \ + --volume ./certificates/:/app/certificates/:ro \ + flwr/superlink:|stable_flwr_version| \ + --ssl-ca-certfile certificates/ca.crt \ + --ssl-certfile certificates/server.pem \ + --ssl-keyfile certificates/server.key .. dropdown:: Understanding the command - * ``docker run``: This tells Docker to run a container from an image. - * ``--rm``: Remove the container once it is stopped or the command exits. - * | ``--volume ./certificates/:/app/certificates/:ro``: Mount the ``certificates`` directory in - | the current working directory of the host machine as a read-only volume at the - | ``/app/certificates`` directory inside the container. - | - | This allows the container to access the TLS certificates that are stored in the certificates - | directory. - * | :substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of the image to be run and the specific - | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. - * | ``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA certificate file - | inside the container. - | - | The ``certificates/ca.crt`` file is a certificate that is used to verify the identity of the - | SuperLink. - * | ``--ssl-certfile certificates/server.pem``: Specify the location of the SuperLink's - | TLS certificate file inside the container. - | - | The ``certificates/server.pem`` file is used to identify the SuperLink and to encrypt the - | data that is transmitted over the network. - * | ``--ssl-keyfile certificates/server.key``: Specify the location of the SuperLink's - | TLS private key file inside the container. - | - | The ``certificates/server.key`` file is used to decrypt the data that is transmitted over - | the network. + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``--volume ./certificates/:/app/certificates/:ro``: Mount the ``certificates`` directory in + | the current working directory of the host machine as a read-only volume at the + | ``/app/certificates`` directory inside the container. + | + | This allows the container to access the TLS certificates that are stored in the certificates + | directory. + * | :substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of the image to be run and the specific + | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. + * | ``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA certificate file + | inside the container. + | + | The ``certificates/ca.crt`` file is a certificate that is used to verify the identity of the + | SuperLink. + * | ``--ssl-certfile certificates/server.pem``: Specify the location of the SuperLink's + | TLS certificate file inside the container. + | + | The ``certificates/server.pem`` file is used to identify the SuperLink and to encrypt the + | data that is transmitted over the network. + * | ``--ssl-keyfile certificates/server.key``: Specify the location of the SuperLink's + | TLS private key file inside the container. + | + | The ``certificates/server.key`` file is used to decrypt the data that is transmitted over + | the network. SuperNode --------- -Assuming that the ``ca.crt`` certificate already exists locally, we can use the flag ``--volume`` to mount the local -certificate into the container's ``/app/`` directory. +Assuming that the ``ca.crt`` certificate already exists locally, we can use the flag +``--volume`` to mount the local certificate into the container's ``/app/`` directory. .. note:: - If you're generating self-signed certificates and the ``ca.crt`` certificate doesn't exist - on the SuperNode, you can copy it over after the generation step. + If you're generating self-signed certificates and the ``ca.crt`` certificate doesn't + exist on the SuperNode, you can copy it over after the generation step. .. code-block:: bash - :substitutions: + :substitutions: - $ docker run --rm \ - --volume ./ca.crt:/app/ca.crt/:ro \ - flwr/supernode:|stable_flwr_version| \ - --root-certificates ca.crt + $ docker run --rm \ + --volume ./ca.crt:/app/ca.crt/:ro \ + flwr/supernode:|stable_flwr_version| \ + --root-certificates ca.crt .. dropdown:: Understanding the command - * ``docker run``: This tells Docker to run a container from an image. - * ``--rm``: Remove the container once it is stopped or the command exits. - * | ``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the - | current working directory of the host machine as a read-only volume at the ``/app/ca.crt`` - | directory inside the container. - * | :substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of the image to be run and the specific - | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. - * | ``--root-certificates ca.crt``: This specifies the location of the CA certificate file - | inside the container. - | - | The ``ca.crt`` file is used to verify the identity of the SuperLink. - + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the + | current working directory of the host machine as a read-only volume at the ``/app/ca.crt`` + | directory inside the container. + * | :substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of the image to be run and the specific + | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. + * | ``--root-certificates ca.crt``: This specifies the location of the CA certificate file + | inside the container. + | + | The ``ca.crt`` file is used to verify the identity of the SuperLink. SuperExec --------- -Assuming all files we need are in the local ``certificates`` directory where the SuperExec will be executed from, we can use the flag -``--volume`` to mount the local directory into the ``/app/certificates/`` directory of the container: +Assuming all files we need are in the local ``certificates`` directory where the +SuperExec will be executed from, we can use the flag ``--volume`` to mount the local +directory into the ``/app/certificates/`` directory of the container: .. code-block:: bash - :substitutions: - - $ docker run --rm \ - --volume ./certificates/:/app/certificates/:ro \ - flwr/superexec:|stable_flwr_version| \ - --ssl-ca-certfile certificates/ca.crt \ - --ssl-certfile certificates/server.pem \ - --ssl-keyfile certificates/server.key \ - --executor-config \ - root-certificates=\"certificates/superlink_ca.crt\" + :substitutions: + $ docker run --rm \ + --volume ./certificates/:/app/certificates/:ro \ + flwr/superexec:|stable_flwr_version| \ + --ssl-ca-certfile certificates/ca.crt \ + --ssl-certfile certificates/server.pem \ + --ssl-keyfile certificates/server.key \ + --executor-config \ + root-certificates=\"certificates/superlink_ca.crt\" .. dropdown:: Understanding the command - * ``docker run``: This tells Docker to run a container from an image. - * ``--rm``: Remove the container once it is stopped or the command exits. - * | ``--volume ./certificates/:/app/certificates/:ro``: Mount the ``certificates`` directory in - | the current working directory of the host machine as a read-only volume at the - | ``/app/certificates`` directory inside the container. - | - | This allows the container to access the TLS certificates that are stored in the certificates - | directory. - * | :substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of the image to be run and the specific - | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. - * | ``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA certificate file - | inside the container. - | - | The ``certificates/ca.crt`` file is a certificate that is used to verify the identity of the - | SuperExec. - * | ``--ssl-certfile certificates/server.pem``: Specify the location of the SuperExec's - | TLS certificate file inside the container. - | - | The ``certificates/server.pem`` file is used to identify the SuperExec and to encrypt the - | data that is transmitted over the network. - * | ``--ssl-keyfile certificates/server.key``: Specify the location of the SuperExec's - | TLS private key file inside the container. - | - | The ``certificates/server.key`` file is used to decrypt the data that is transmitted over - | the network. - * | ``--executor-config root-certificates=\"certificates/superlink_ca.crt\"``: Specify the - | location of the CA certificate file inside the container that the SuperExec executor - | should use to verify the SuperLink's identity. + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``--volume ./certificates/:/app/certificates/:ro``: Mount the ``certificates`` directory in + | the current working directory of the host machine as a read-only volume at the + | ``/app/certificates`` directory inside the container. + | + | This allows the container to access the TLS certificates that are stored in the certificates + | directory. + * | :substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of the image to be run and the specific + | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. + * | ``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA certificate file + | inside the container. + | + | The ``certificates/ca.crt`` file is a certificate that is used to verify the identity of the + | SuperExec. + * | ``--ssl-certfile certificates/server.pem``: Specify the location of the SuperExec's + | TLS certificate file inside the container. + | + | The ``certificates/server.pem`` file is used to identify the SuperExec and to encrypt the + | data that is transmitted over the network. + * | ``--ssl-keyfile certificates/server.key``: Specify the location of the SuperExec's + | TLS private key file inside the container. + | + | The ``certificates/server.key`` file is used to decrypt the data that is transmitted over + | the network. + * | ``--executor-config root-certificates=\"certificates/superlink_ca.crt\"``: Specify the + | location of the CA certificate file inside the container that the SuperExec executor + | should use to verify the SuperLink's identity. diff --git a/doc/source/docker/index.rst b/doc/source/docker/index.rst index 968f01581b34..3fd391114dc1 100644 --- a/doc/source/docker/index.rst +++ b/doc/source/docker/index.rst @@ -1,47 +1,48 @@ Run Flower using Docker ======================= -Start your Flower journey with our pre-made Docker images on Docker Hub, supporting ``amd64`` -and ``arm64v8`` architectures. +Start your Flower journey with our pre-made Docker images on Docker Hub, supporting +``amd64`` and ``arm64v8`` architectures. -Our Quickstart guide walks you through containerizing a Flower project and running it end to -end using Docker. +Our Quickstart guide walks you through containerizing a Flower project and running it +end to end using Docker. Getting Started --------------- .. toctree:: - :maxdepth: 1 - - tutorial-quickstart-docker + :maxdepth: 1 + tutorial-quickstart-docker Running in Production --------------------- .. toctree:: - :maxdepth: 1 + :maxdepth: 1 - enable-tls - persist-superlink-state + enable-tls + persist-superlink-state Advanced Options ---------------- .. toctree:: - :maxdepth: 1 + :maxdepth: 1 - set-environment-variables - run-as-root-user - run-as-subprocess - pin-version - use-a-different-version + set-environment-variables + run-as-root-user + run-as-subprocess + pin-version + use-a-different-version Run Flower using Docker Compose ------------------------------- .. toctree:: - :maxdepth: 1 - tutorial-quickstart-docker-compose - run-quickstart-examples-docker-compose + :maxdepth: 1 + + tutorial-quickstart-docker-compose + run-quickstart-examples-docker-compose + tutorial-deploy-on-multiple-machines diff --git a/doc/source/docker/persist-superlink-state.rst b/doc/source/docker/persist-superlink-state.rst index 68e04ed33762..214e408c44c3 100644 --- a/doc/source/docker/persist-superlink-state.rst +++ b/doc/source/docker/persist-superlink-state.rst @@ -1,39 +1,40 @@ Persist the State of the SuperLink ================================== -By default, the Flower SuperLink keeps its state in-memory. When using the Docker flag ``--rm``, the -state is not persisted between container starts. +By default, the Flower SuperLink keeps its state in-memory. When using the Docker flag +``--rm``, the state is not persisted between container starts. -If you want to persist the state of the SuperLink on your host system, all you need to do is specify -a directory where you want to save the file on your host system and a name for the database file. +If you want to persist the state of the SuperLink on your host system, all you need to +do is specify a directory where you want to save the file on your host system and a name +for the database file. -By default, the SuperLink container runs with a non-root user called ``app`` with the user ID -``49999``. It is recommended to create a new directory and change the user ID of the directory to -``49999`` to ensure the mounted directory has the proper permissions. +By default, the SuperLink container runs with a non-root user called ``app`` with the +user ID ``49999``. It is recommended to create a new directory and change the user ID of +the directory to ``49999`` to ensure the mounted directory has the proper permissions. -If you later want to delete the directory, you can change the user ID back to the current user -ID by running ``sudo chown -R $USER:$(id -gn) state``. +If you later want to delete the directory, you can change the user ID back to the +current user ID by running ``sudo chown -R $USER:$(id -gn) state``. Example ------- -In the example below, we create a new directory called ``state``, change the user ID and tell -Docker via the flag ``--volume`` to mount the local ``state`` directory into the ``/app/state`` -directory of the container. Lastly, we use the flag ``--database`` to specify the name of the -database file. +In the example below, we create a new directory called ``state``, change the user ID and +tell Docker via the flag ``--volume`` to mount the local ``state`` directory into the +``/app/state`` directory of the container. Lastly, we use the flag ``--database`` to +specify the name of the database file. .. code-block:: bash - :substitutions: - - $ mkdir state - $ sudo chown -R 49999:49999 state - $ docker run --rm \ - --volume ./state/:/app/state flwr/superlink:|stable_flwr_version| \ - --database state.db \ - ... - -As soon as the SuperLink starts, the file ``state.db`` is created in the ``state`` directory on -your host system. If the file already exists, the SuperLink tries to restore the state from the -file. To start the SuperLink with an empty database, ensure that there is no database -called ``state.db`` in the ``state`` directory (``rm state.db``) before you execute the -``docker run`` command above. + :substitutions: + + $ mkdir state + $ sudo chown -R 49999:49999 state + $ docker run --rm \ + --volume ./state/:/app/state flwr/superlink:|stable_flwr_version| \ + --database state.db \ + ... + +As soon as the SuperLink starts, the file ``state.db`` is created in the ``state`` +directory on your host system. If the file already exists, the SuperLink tries to +restore the state from the file. To start the SuperLink with an empty database, ensure +that there is no database called ``state.db`` in the ``state`` directory (``rm +state.db``) before you execute the ``docker run`` command above. diff --git a/doc/source/docker/pin-version.rst b/doc/source/docker/pin-version.rst index 800e3ed95423..4a69860aa428 100644 --- a/doc/source/docker/pin-version.rst +++ b/doc/source/docker/pin-version.rst @@ -1,10 +1,11 @@ Pin a Docker Image to a Specific Version ======================================== -It may happen that we update the images behind the tags. Such updates usually include security -updates of system dependencies that should not change the functionality of Flower. However, if -you want to ensure that you use a fixed version of the Docker image in your deployments, you can -`specify the digest `_ +It may happen that we update the images behind the tags. Such updates usually include +security updates of system dependencies that should not change the functionality of +Flower. However, if you want to ensure that you use a fixed version of the Docker image +in your deployments, you can `specify the digest +`_ of the image instead of the tag. Example @@ -14,23 +15,23 @@ The following command returns the current image digest referenced by the :substitution-code:`superlink:|stable_flwr_version|` tag: .. code-block:: bash - :substitutions: + :substitutions: - $ docker pull flwr/superlink:|stable_flwr_version| - $ docker inspect --format='{{index .RepoDigests 0}}' flwr/superlink:|stable_flwr_version| + $ docker pull flwr/superlink:|stable_flwr_version| + $ docker inspect --format='{{index .RepoDigests 0}}' flwr/superlink:|stable_flwr_version| This will output .. code-block:: bash - :substitutions: + :substitutions: - flwr/superlink@sha256:|stable__flwr_superlink_docker_digest| + flwr/superlink@sha256:|stable__flwr_superlink_docker_digest| Next, we can pin the digest when running a new SuperLink container: .. code-block:: bash - :substitutions: + :substitutions: - $ docker run \ - --rm flwr/superlink@sha256:|latest_version_docker_sha| \ - [OPTIONS] + $ docker run \ + --rm flwr/superlink@sha256:|latest_version_docker_sha| \ + [OPTIONS] diff --git a/doc/source/docker/run-as-root-user.rst b/doc/source/docker/run-as-root-user.rst index d1b41a9b6168..5f8e5eae43af 100644 --- a/doc/source/docker/run-as-root-user.rst +++ b/doc/source/docker/run-as-root-user.rst @@ -2,11 +2,11 @@ Run with Root User Privileges ============================= Flower Docker images, by default, run with a non-root user (username/groupname: ``app``, -UID/GID: ``49999``). Using root user is **not recommended** unless it is necessary for specific -tasks during the build process. +UID/GID: ``49999``). Using root user is **not recommended** unless it is necessary for +specific tasks during the build process. -Always make sure to run the container as a non-root user in production to maintain security -best practices. +Always make sure to run the container as a non-root user in production to maintain +security best practices. Run a Container with Root User Privileges ----------------------------------------- @@ -14,32 +14,33 @@ Run a Container with Root User Privileges Run the Docker image with the ``-u`` flag and specify ``root`` as the username: .. code-block:: bash - :substitutions: + :substitutions: - $ docker run --rm -u root flwr/superlink:|stable_flwr_version| + $ docker run --rm -u root flwr/superlink:|stable_flwr_version| This command will run the Docker container with root user privileges. Run the Build Process with Root User Privileges ----------------------------------------------- -If you want to switch to the root user during the build process of the Docker image to install -missing system dependencies, you can use the ``USER root`` directive within your Dockerfile. +If you want to switch to the root user during the build process of the Docker image to +install missing system dependencies, you can use the ``USER root`` directive within your +Dockerfile. .. code-block:: dockerfile - :caption: SuperNode Dockerfile - :substitutions: + :caption: SuperNode Dockerfile + :substitutions: - FROM flwr/supernode:|stable_flwr_version| + FROM flwr/supernode:|stable_flwr_version| - # Switch to root user - USER root + # Switch to root user + USER root - # Install missing dependencies (requires root access) - RUN apt-get update && apt-get install -y + # Install missing dependencies (requires root access) + RUN apt-get update && apt-get install -y - # Switch back to non-root user app - USER app + # Switch back to non-root user app + USER app - # Continue with your Docker image build process - # ... + # Continue with your Docker image build process + # ... diff --git a/doc/source/docker/run-as-subprocess.rst b/doc/source/docker/run-as-subprocess.rst index f8c482f632a0..d97319ff52af 100644 --- a/doc/source/docker/run-as-subprocess.rst +++ b/doc/source/docker/run-as-subprocess.rst @@ -1,53 +1,53 @@ Run ClientApp as a Subprocess ============================= -In this mode, the ClientApp is executed as a subprocess within the SuperNode Docker container, -rather than running in a separate container. This approach reduces the number of running containers, -which can be beneficial for environments with limited resources. However, it also means that the -ClientApp is no longer isolated from the SuperNode, which may introduce additional security -concerns. +In this mode, the ClientApp is executed as a subprocess within the SuperNode Docker +container, rather than running in a separate container. This approach reduces the number +of running containers, which can be beneficial for environments with limited resources. +However, it also means that the ClientApp is no longer isolated from the SuperNode, +which may introduce additional security concerns. Prerequisites ------------- -#. Before running the ClientApp as a subprocess, ensure that the FAB dependencies have been installed - in the SuperNode images. This can be done by extending the SuperNode image: +1. Before running the ClientApp as a subprocess, ensure that the FAB dependencies have + been installed in the SuperNode images. This can be done by extending the SuperNode + image: .. code-block:: dockerfile - :caption: Dockerfile.supernode - :linenos: - :substitutions: + :caption: Dockerfile.supernode + :linenos: + :substitutions: - FROM flwr/supernode:|stable_flwr_version| + FROM flwr/supernode:|stable_flwr_version| - WORKDIR /app - COPY pyproject.toml . - RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ - && python -m pip install -U --no-cache-dir . + WORKDIR /app + COPY pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . - ENTRYPOINT ["flower-supernode"] + ENTRYPOINT ["flower-supernode"] -#. Next, build the SuperNode Docker image by running the following command in the directory where - Dockerfile is located: +2. Next, build the SuperNode Docker image by running the following command in the + directory where Dockerfile is located: .. code-block:: shell - $ docker build -f Dockerfile.supernode -t flwr_supernode:0.0.1 . - + $ docker build -f Dockerfile.supernode -t flwr_supernode:0.0.1 . Run the ClientApp as a Subprocess --------------------------------- -Start the SuperNode with the flag ``--isolation subprocess``, which tells the SuperNode to execute -the ClientApp as a subprocess: +Start the SuperNode with the flag ``--isolation subprocess``, which tells the SuperNode +to execute the ClientApp as a subprocess: .. code-block:: shell - $ docker run --rm \ - --detach \ - flwr_supernode:0.0.1 \ - --insecure \ - --superlink superlink:9092 \ - --node-config "partition-id=1 num-partitions=2" \ - --supernode-address localhost:9094 \ - --isolation subprocess + $ docker run --rm \ + --detach \ + flwr_supernode:0.0.1 \ + --insecure \ + --superlink superlink:9092 \ + --node-config "partition-id=1 num-partitions=2" \ + --supernode-address localhost:9094 \ + --isolation subprocess diff --git a/doc/source/docker/run-quickstart-examples-docker-compose.rst b/doc/source/docker/run-quickstart-examples-docker-compose.rst index 5bdb33e991dd..b31f0035e143 100644 --- a/doc/source/docker/run-quickstart-examples-docker-compose.rst +++ b/doc/source/docker/run-quickstart-examples-docker-compose.rst @@ -1,15 +1,16 @@ Run Flower Quickstart Examples with Docker Compose ================================================== -Flower provides a set of `quickstart examples `_ -to help you get started with the framework. These examples are designed to demonstrate the -capabilities of Flower and by default run using the Simulation Engine. This guide demonstrates -how to run them using Flower's Deployment Engine via Docker Compose. +Flower provides a set of `quickstart examples +`_ to help you get started with the +framework. These examples are designed to demonstrate the capabilities of Flower and by +default run using the Simulation Engine. This guide demonstrates how to run them using +Flower's Deployment Engine via Docker Compose. .. important:: - Some quickstart examples may have limitations or requirements that prevent them from running - on every environment. For more information, please see `Limitations`_. + Some quickstart examples may have limitations or requirements that prevent them from + running on every environment. For more information, please see Limitations_. Prerequisites ------------- @@ -23,68 +24,71 @@ Before you start, make sure that: Run the Quickstart Example -------------------------- -#. Clone the quickstart example you like to run. For example, ``quickstart-pytorch``: +1. Clone the quickstart example you like to run. For example, ``quickstart-pytorch``: .. code-block:: bash - $ git clone --depth=1 https://github.com/adap/flower.git \ - && mv flower/examples/quickstart-pytorch . \ - && rm -rf flower && cd quickstart-pytorch + $ git clone --depth=1 https://github.com/adap/flower.git \ + && mv flower/examples/quickstart-pytorch . \ + && rm -rf flower && cd quickstart-pytorch -#. Download the `compose.yml `_ file into the example directory: +2. Download the `compose.yml + `_ file + into the example directory: .. code-block:: bash - $ curl https://raw.githubusercontent.com/adap/flower/refs/heads/main/src/docker/complete/compose.yml \ - -o compose.yml + $ curl https://raw.githubusercontent.com/adap/flower/refs/heads/main/src/docker/complete/compose.yml \ + -o compose.yml -#. Build and start the services using the following command: +3. Build and start the services using the following command: .. code-block:: bash - $ docker compose up --build -d + $ docker compose up --build -d -#. Append the following lines to the end of the ``pyproject.toml`` file and save it: +4. Append the following lines to the end of the ``pyproject.toml`` file and save it: .. code-block:: toml - :caption: pyproject.toml + :caption: pyproject.toml - [tool.flwr.federations.local-deployment] - address = "127.0.0.1:9093" - insecure = true + [tool.flwr.federations.local-deployment] + address = "127.0.0.1:9093" + insecure = true .. note:: - You can customize the string that follows ``tool.flwr.federations.`` to fit your needs. - However, please note that the string cannot contain a dot (``.``). + You can customize the string that follows ``tool.flwr.federations.`` to fit your + needs. However, please note that the string cannot contain a dot (``.``). - In this example, ``local-deployment`` has been used. Just remember to replace - ``local-deployment`` with your chosen name in both the ``tool.flwr.federations.`` string - and the corresponding ``flwr run .`` command. + In this example, ``local-deployment`` has been used. Just remember to replace + ``local-deployment`` with your chosen name in both the ``tool.flwr.federations.`` + string and the corresponding ``flwr run .`` command. -#. Run the example: +5. Run the example: .. code-block:: bash - $ flwr run . local-deployment + $ flwr run . local-deployment -#. Follow the logs of the SuperExec service: +6. Follow the logs of the SuperExec service: .. code-block:: bash - $ docker compose logs superexec -f + $ docker compose logs superexec -f -That is all it takes! You can monitor the progress of the run through the logs of the SuperExec. +That is all it takes! You can monitor the progress of the run through the logs of the +SuperExec. Run a Different Quickstart Example ---------------------------------- -To run a different quickstart example, such as ``quickstart-tensorflow``, first, shut down the Docker -Compose services of the current example: +To run a different quickstart example, such as ``quickstart-tensorflow``, first, shut +down the Docker Compose services of the current example: .. code-block:: bash - $ docker compose down + $ docker compose down After that, you can repeat the steps above. @@ -92,31 +96,32 @@ Limitations ----------- .. list-table:: - :header-rows: 1 - - * - Quickstart Example - - Limitations - * - quickstart-fastai - - None - * - quickstart-huggingface - - None - * - quickstart-jax - - The example has not yet been updated to work with the latest ``flwr`` version. - * - quickstart-mlcube - - The example has not yet been updated to work with the latest ``flwr`` version. - * - quickstart-mlx - - `Requires to run on macOS with Apple Silicon `_. - * - quickstart-monai - - None - * - quickstart-pandas - - None - * - quickstart-pytorch-lightning - - Requires an older pip version that is not supported by the Flower Docker images. - * - quickstart-pytorch - - None - * - quickstart-sklearn-tabular - - None - * - quickstart-tabnet - - The example has not yet been updated to work with the latest ``flwr`` version. - * - quickstart-tensorflow - - Only runs on AMD64. + :header-rows: 1 + + - - Quickstart Example + - Limitations + - - quickstart-fastai + - None + - - quickstart-huggingface + - None + - - quickstart-jax + - The example has not yet been updated to work with the latest ``flwr`` version. + - - quickstart-mlcube + - The example has not yet been updated to work with the latest ``flwr`` version. + - - quickstart-mlx + - `Requires to run on macOS with Apple Silicon + `_. + - - quickstart-monai + - None + - - quickstart-pandas + - None + - - quickstart-pytorch-lightning + - Requires an older pip version that is not supported by the Flower Docker images. + - - quickstart-pytorch + - None + - - quickstart-sklearn-tabular + - None + - - quickstart-tabnet + - The example has not yet been updated to work with the latest ``flwr`` version. + - - quickstart-tensorflow + - Only runs on AMD64. diff --git a/doc/source/docker/set-environment-variables.rst b/doc/source/docker/set-environment-variables.rst index ff8d6dde0a29..f5d860812bab 100644 --- a/doc/source/docker/set-environment-variables.rst +++ b/doc/source/docker/set-environment-variables.rst @@ -8,7 +8,7 @@ Example ------- .. code-block:: bash - :substitutions: + :substitutions: - $ docker run -e FLWR_TELEMETRY_ENABLED=0 -e FLWR_TELEMETRY_LOGGING=0 \ - --rm flwr/superlink:|stable_flwr_version| + $ docker run -e FLWR_TELEMETRY_ENABLED=0 -e FLWR_TELEMETRY_LOGGING=0 \ + --rm flwr/superlink:|stable_flwr_version| diff --git a/doc/source/docker/tutorial-deploy-on-multiple-machines.rst b/doc/source/docker/tutorial-deploy-on-multiple-machines.rst new file mode 100644 index 000000000000..72958c926ba9 --- /dev/null +++ b/doc/source/docker/tutorial-deploy-on-multiple-machines.rst @@ -0,0 +1,171 @@ +Deploy Flower on Multiple Machines with Docker Compose +====================================================== + +This guide will help you set up a Flower project on multiple machines using Docker +Compose. + +You will learn how to run the Flower client and server components on two separate +machines, with Flower configured to use TLS encryption and persist SuperLink state +across restarts. A server consists of a SuperLink and ``SuperExec``. For more details +about the Flower architecture, refer to the :doc:`../explanation-flower-architecture` +explainer page. + +This guide assumes you have completed the :doc:`tutorial-quickstart-docker-compose` +tutorial. It is highly recommended that you follow and understand the contents of that +tutorial before proceeding with this guide. + +Prerequisites +------------- + +Before you begin, make sure you have the following prerequisites: + +- The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally. +- The Docker daemon is running on your local machine and the remote machine. +- Docker Compose V2 is installed on both your local machine and the remote machine. +- You can connect to the remote machine from your local machine. +- Ports ``9091`` and ``9093`` are accessible on the remote machine. + +.. note:: + + The guide uses the |quickstart_sklearn_tabular|_ example as an example project. + + If your project has a different name or location, please remember to adjust the + commands/paths accordingly. + +Step 1: Set Up +-------------- + +1. Clone the Flower repository and change to the ``distributed`` directory: + + .. code-block:: bash + + $ git clone --depth=1 https://github.com/adap/flower.git + $ cd flower/src/docker/distributed + +2. Get the IP address from the remote machine and save it for later. +3. Use the ``certs.yml`` Compose file to generate your own self-signed certificates. If + you have certificates, you can continue with Step 2. + + .. important:: + + These certificates should be used only for development purposes. + + For production environments, you may have to use dedicated services to obtain + your certificates. + + First, set the environment variables ``SUPERLINK_IP`` and ``SUPEREXEC_IP`` with the + IP address from the remote machine. For example, if the IP is ``192.168.2.33``, + execute: + + .. code-block:: bash + + $ export SUPERLINK_IP=192.168.2.33 + $ export SUPEREXEC_IP=192.168.2.33 + + Next, generate the self-signed certificates: + + .. code-block:: bash + + $ docker compose -f certs.yml -f ../complete/certs.yml up --build + +Step 2: Copy the Server Compose Files +------------------------------------- + +Use the method that works best for you to copy the ``server`` directory, the +certificates, and your Flower project to the remote machine. + +For example, you can use ``scp`` to copy the directories: + +.. code-block:: bash + + $ scp -r ./server \ + ./superexec-certificates \ + ./superlink-certificates \ + ../../../examples/quickstart-sklearn-tabular remote:~/distributed + +Step 3: Start the Flower Server Components +------------------------------------------ + +Log into the remote machine using ``ssh`` and run the following command to start the +SuperLink and SuperExec services: + +.. code-block:: bash + + $ ssh + # In your remote machine + $ cd + $ export PROJECT_DIR=../quickstart-sklearn-tabular + $ docker compose -f server/compose.yml up --build -d + +.. note:: + + The Path of the ``PROJECT_DIR`` should be relative to the location of the ``server`` + Docker Compose files. + +Go back to your terminal on your local machine. + +Step 4: Start the Flower Client Components +------------------------------------------ + +On your local machine, run the following command to start the client components: + +.. code-block:: bash + + # In the `docker/distributed` directory + $ export PROJECT_DIR=../../../../examples/quickstart-sklearn-tabular + $ docker compose -f client/compose.yml up --build -d + +.. note:: + + The Path of the ``PROJECT_DIR`` should be relative to the location of the ``client`` + Docker Compose files. + +Step 5: Run Your Flower Project +------------------------------- + +Specify the remote SuperExec IP addresses and the path to the root certificate in the +``[tool.flwr.federations.remote-superexec]`` table in the ``pyproject.toml`` file. Here, +we have named our remote federation ``remote-superexec``: + +.. code-block:: toml + :caption: examples/quickstart-sklearn-tabular/pyproject.toml + + [tool.flwr.federations.remote-superexec] + address = "192.168.2.33:9093" + root-certificates = "../../src/docker/distributed/superexec-certificates/ca.crt" + +.. note:: + + The Path of the ``root-certificates`` should be relative to the location of the + ``pyproject.toml`` file. + +To run the project, execute: + +.. code-block:: bash + + $ flwr run ../../../examples/quickstart-sklearn-tabular remote-superexec + +That's it! With these steps, you've set up Flower on two separate machines and are ready +to start using it. + +Step 6: Clean Up +---------------- + +Shut down the Flower client components: + +.. code-block:: bash + + # In the `docker/distributed` directory + $ docker compose -f client/compose.yml down + +Shut down the Flower server components and delete the SuperLink state: + +.. code-block:: bash + + $ ssh + $ cd + $ docker compose -f server/compose.yml down -v + +.. |quickstart_sklearn_tabular| replace:: ``examples/quickstart-sklearn-tabular`` + +.. _quickstart_sklearn_tabular: https://github.com/adap/flower/tree/main/examples/quickstart-sklearn-tabular diff --git a/doc/source/docker/tutorial-quickstart-docker-compose.rst b/doc/source/docker/tutorial-quickstart-docker-compose.rst index 7aeae1e2fb6b..bff3125c1b16 100644 --- a/doc/source/docker/tutorial-quickstart-docker-compose.rst +++ b/doc/source/docker/tutorial-quickstart-docker-compose.rst @@ -2,11 +2,12 @@ Quickstart with Docker Compose ============================== This quickstart shows you how to set up Flower using Docker Compose in a single command, -allowing you to focus on developing your application without worrying about the underlying -infrastructure. +allowing you to focus on developing your application without worrying about the +underlying infrastructure. -You will also learn how to easily enable TLS encryption and persist application state locally, -giving you the freedom to choose the configuration that best suits your project's needs. +You will also learn how to easily enable TLS encryption and persist application state +locally, giving you the freedom to choose the configuration that best suits your +project's needs. Prerequisites ------------- @@ -20,55 +21,56 @@ Before you start, make sure that: Step 1: Set Up -------------- -#. Clone the Docker Compose ``complete`` directory: +1. Clone the Docker Compose ``complete`` directory: .. code-block:: bash - $ git clone --depth=1 https://github.com/adap/flower.git _tmp \ - && mv _tmp/src/docker/complete . \ - && rm -rf _tmp && cd complete + $ git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/src/docker/complete . \ + && rm -rf _tmp && cd complete -#. Create a new Flower project (PyTorch): +2. Create a new Flower project (PyTorch): .. code-block:: bash - $ flwr new quickstart-compose --framework PyTorch --username flower + $ flwr new quickstart-compose --framework PyTorch --username flower -#. Export the path of the newly created project. The path should be relative to the location of the - Docker Compose files: +3. Export the path of the newly created project. The path should be relative to the + location of the Docker Compose files: .. code-block:: bash - $ export PROJECT_DIR=quickstart-compose + $ export PROJECT_DIR=quickstart-compose - Setting the ``PROJECT_DIR`` helps Docker Compose locate the ``pyproject.toml`` file, allowing - it to install dependencies in the SuperExec and SuperNode images correctly. + Setting the ``PROJECT_DIR`` helps Docker Compose locate the ``pyproject.toml`` file, + allowing it to install dependencies in the SuperExec and SuperNode images correctly. Step 2: Run Flower in Insecure Mode ----------------------------------- -To begin, start Flower with the most basic configuration. In this setup, Flower -will run without TLS and without persisting the state. +To begin, start Flower with the most basic configuration. In this setup, Flower will run +without TLS and without persisting the state. .. note:: - Without TLS, the data sent between the services remains **unencrypted**. Use it only for development - purposes. + Without TLS, the data sent between the services remains **unencrypted**. Use it only + for development purposes. - For production-oriented use cases, :ref:`enable TLS` for secure data transmission. + For production-oriented use cases, :ref:`enable TLS` for secure data + transmission. Open your terminal and run: .. code-block:: bash - $ docker compose -f compose.yml up --build -d + $ docker compose -f compose.yml up --build -d .. dropdown:: Understand the command - * ``docker compose``: The Docker command to run the Docker Compose tool. - * ``-f compose.yml``: Specify the YAML file that contains the basic Flower service definitions. - * ``--build``: Rebuild the images for each service if they don't already exist. - * ``-d``: Detach the containers from the terminal and run them in the background. + * ``docker compose``: The Docker command to run the Docker Compose tool. + * ``-f compose.yml``: Specify the YAML file that contains the basic Flower service definitions. + * ``--build``: Rebuild the images for each service if they don't already exist. + * ``-d``: Detach the containers from the terminal and run them in the background. Step 3: Run the Quickstart Project ---------------------------------- @@ -76,316 +78,321 @@ Step 3: Run the Quickstart Project Now that the Flower services have been started via Docker Compose, it is time to run the quickstart example. -To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify the SuperExec addresses -in the ``pyproject.toml`` file. +To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify the SuperExec +addresses in the ``pyproject.toml`` file. -#. Add the following lines to the ``quickstart-compose/pyproject.toml``: +1. Add the following lines to the ``quickstart-compose/pyproject.toml``: .. code-block:: toml - :caption: quickstart-compose/pyproject.toml + :caption: quickstart-compose/pyproject.toml - [tool.flwr.federations.docker-compose] - address = "127.0.0.1:9093" - insecure = true + [tool.flwr.federations.docker-compose] + address = "127.0.0.1:9093" + insecure = true -#. Execute the command to run the quickstart example: +2. Execute the command to run the quickstart example: .. code-block:: bash - $ flwr run quickstart-compose docker-compose + $ flwr run quickstart-compose docker-compose -#. Monitor the SuperExec logs and wait for the summary to appear: +3. Monitor the SuperExec logs and wait for the summary to appear: .. code-block:: bash - $ docker compose logs superexec -f + $ docker compose logs superexec -f Step 4: Update the Application ------------------------------ In the next step, change the application code. -#. For example, go to the ``task.py`` file in the ``quickstart-compose/quickstart_compose/`` - directory and add a ``print`` call in the ``get_weights`` function: +1. For example, go to the ``task.py`` file in the + ``quickstart-compose/quickstart_compose/`` directory and add a ``print`` call in the + ``get_weights`` function: .. code-block:: python - :caption: quickstart-compose/quickstart_compose/task.py + :caption: quickstart-compose/quickstart_compose/task.py - # ... - def get_weights(net): - print("Get weights") - return [val.cpu().numpy() for _, val in net.state_dict().items()] - # ... + # ... + def get_weights(net): + print("Get weights") + return [val.cpu().numpy() for _, val in net.state_dict().items()] -#. Rebuild and restart the services. + + # ... + +2. Rebuild and restart the services. .. note:: - If you have modified the dependencies listed in your ``pyproject.toml`` file, it is essential - to rebuild images. + If you have modified the dependencies listed in your ``pyproject.toml`` file, it + is essential to rebuild images. - If you haven't made any changes, you can skip this step. + If you haven't made any changes, you can skip this step. Run the following command to rebuild and restart the services: .. code-block:: bash - $ docker compose -f compose.yml up --build -d + $ docker compose -f compose.yml up --build -d -#. Run the updated quickstart example: +3. Run the updated quickstart example: .. code-block:: bash - $ flwr run quickstart-compose docker-compose - $ docker compose logs superexec -f + $ flwr run quickstart-compose docker-compose + $ docker compose logs superexec -f In the SuperExec logs, you should find the ``Get weights`` line: .. code-block:: - :emphasize-lines: 9 - - superexec-1 | INFO : Starting Flower SuperExec - superexec-1 | WARNING : Option `--insecure` was set. Starting insecure HTTP server. - superexec-1 | INFO : Starting Flower SuperExec gRPC server on 0.0.0.0:9093 - superexec-1 | INFO : ExecServicer.StartRun - superexec-1 | 🎊 Successfully installed quickstart-compose to /app/.flwr/apps/flower/quickstart-compose/1.0.0. - superexec-1 | INFO : Created run -6767165609169293507 - superexec-1 | INFO : Started run -6767165609169293507 - superexec-1 | WARNING : Option `--insecure` was set. Starting insecure HTTP client connected to superlink:9091. - superexec-1 | Get weights - superexec-1 | INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + :emphasize-lines: 9 + + superexec-1 | INFO : Starting Flower SuperExec + superexec-1 | WARNING : Option `--insecure` was set. Starting insecure HTTP server. + superexec-1 | INFO : Starting Flower SuperExec gRPC server on 0.0.0.0:9093 + superexec-1 | INFO : ExecServicer.StartRun + superexec-1 | 🎊 Successfully installed quickstart-compose to /app/.flwr/apps/flower/quickstart-compose/1.0.0. + superexec-1 | INFO : Created run -6767165609169293507 + superexec-1 | INFO : Started run -6767165609169293507 + superexec-1 | WARNING : Option `--insecure` was set. Starting insecure HTTP client connected to superlink:9091. + superexec-1 | Get weights + superexec-1 | INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout Step 5: Persisting the SuperLink State -------------------------------------- -In this step, Flower services are configured to persist the state of the SuperLink service, -ensuring that it maintains its state even after a restart. +In this step, Flower services are configured to persist the state of the SuperLink +service, ensuring that it maintains its state even after a restart. .. note:: - When working with Docker Compose on Linux, you may need to create the ``state`` directory first - and change its ownership to ensure proper access and permissions. + When working with Docker Compose on Linux, you may need to create the ``state`` + directory first and change its ownership to ensure proper access and permissions. For more information, consult the following page: :doc:`persist-superlink-state`. -#. Run the command: +1. Run the command: .. code-block:: bash - $ docker compose -f compose.yml -f with-state.yml up --build -d + $ docker compose -f compose.yml -f with-state.yml up --build -d .. dropdown:: Understand the command - * ``docker compose``: The Docker command to run the Docker Compose tool. - * ``-f compose.yml``: Specify the YAML file that contains the basic Flower service definitions. - * | ``-f with-state.yml``: Specifies the path to an additional Docker Compose file that - | contains the configuration for persisting the SuperLink state. - | - | Docker merges Compose files according to `merging rules `_. - * ``--build``: Rebuild the images for each service if they don't already exist. - * ``-d``: Detach the containers from the terminal and run them in the background. + * ``docker compose``: The Docker command to run the Docker Compose tool. + * ``-f compose.yml``: Specify the YAML file that contains the basic Flower service definitions. + * | ``-f with-state.yml``: Specifies the path to an additional Docker Compose file that + | contains the configuration for persisting the SuperLink state. + | + | Docker merges Compose files according to `merging rules `_. + * ``--build``: Rebuild the images for each service if they don't already exist. + * ``-d``: Detach the containers from the terminal and run them in the background. -#. Rerun the ``quickstart-compose`` project: +2. Rerun the ``quickstart-compose`` project: .. code-block:: bash - $ flwr run quickstart-compose docker-compose + $ flwr run quickstart-compose docker-compose -#. Check the content of the ``state`` directory: +3. Check the content of the ``state`` directory: .. code-block:: bash - $ ls state/ - state.db + $ ls state/ + state.db - You should see a ``state.db`` file in the ``state`` directory. If you restart the service, the - state file will be used to restore the state from the previously saved data. This ensures that - the data persists even if the containers are stopped and started again. + You should see a ``state.db`` file in the ``state`` directory. If you restart the + service, the state file will be used to restore the state from the previously saved + data. This ensures that the data persists even if the containers are stopped and + started again. -.. _TLS: +.. _tls: Step 6: Run Flower with TLS --------------------------- -#. To demonstrate how to enable TLS, generate self-signed certificates using the ``certs.yml`` - Compose file. +1. To demonstrate how to enable TLS, generate self-signed certificates using the + ``certs.yml`` Compose file. .. important:: - These certificates should be used only for development purposes. + These certificates should be used only for development purposes. - For production environments, use a service like `Let's Encrypt `_ - to obtain your certificates. + For production environments, use a service like `Let's Encrypt + `_ to obtain your certificates. Run the command: .. code-block:: bash - $ docker compose -f certs.yml up --build + $ docker compose -f certs.yml up --build -#. Add the following lines to the ``quickstart-compose/pyproject.toml``: +2. Add the following lines to the ``quickstart-compose/pyproject.toml``: .. code-block:: toml - :caption: quickstart-compose/pyproject.toml + :caption: quickstart-compose/pyproject.toml - [tool.flwr.federations.docker-compose-tls] - address = "127.0.0.1:9093" - root-certificates = "../superexec-certificates/ca.crt" + [tool.flwr.federations.docker-compose-tls] + address = "127.0.0.1:9093" + root-certificates = "../superexec-certificates/ca.crt" -#. Restart the services with TLS enabled: +3. Restart the services with TLS enabled: .. code-block:: bash - $ docker compose -f compose.yml -f with-tls.yml up --build -d + $ docker compose -f compose.yml -f with-tls.yml up --build -d -#. Rerun the ``quickstart-compose`` project: +4. Rerun the ``quickstart-compose`` project: .. code-block:: bash - $ flwr run quickstart-compose docker-compose-tls - $ docker compose logs superexec -f + $ flwr run quickstart-compose docker-compose-tls + $ docker compose logs superexec -f Step 7: Add another SuperNode ----------------------------- -You can add more SuperNodes and ClientApps by duplicating their definitions in the ``compose.yml`` -file. +You can add more SuperNodes and ClientApps by duplicating their definitions in the +``compose.yml`` file. -Just give each new SuperNode and ClientApp service a unique service name like ``supernode-3``, -``clientapp-3``, etc. +Just give each new SuperNode and ClientApp service a unique service name like +``supernode-3``, ``clientapp-3``, etc. In ``compose.yml``, add the following: .. code-block:: yaml - :caption: compose.yml - :substitutions: - - # other service definitions - - supernode-3: - image: flwr/supernode:${FLWR_VERSION:-|stable_flwr_version|} - command: - - --insecure - - --superlink - - superlink:9092 - - --supernode-address - - 0.0.0.0:9096 - - --isolation - - process - - --node-config - - "partition-id=1 num-partitions=2" - depends_on: - - superlink - - clientapp-3: - build: - context: ${PROJECT_DIR:-.} - dockerfile_inline: | - FROM flwr/clientapp:${FLWR_VERSION:-|stable_flwr_version|} - - USER root - RUN apt-get update \ - && apt-get -y --no-install-recommends install \ - build-essential \ - && rm -rf /var/lib/apt/lists/* - USER app - - WORKDIR /app - COPY --chown=app:app pyproject.toml . - RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ - && python -m pip install -U --no-cache-dir . - - ENTRYPOINT ["flwr-clientapp"] - command: - - --supernode - - supernode-3:9096 - deploy: - resources: - limits: - cpus: "2" - stop_signal: SIGINT - depends_on: - - supernode-3 - -If you also want to enable TLS for the new SuperNodes, duplicate the SuperNode definition for -each new SuperNode service in the ``with-tls.yml`` file. + :caption: compose.yml + :substitutions: + + # other service definitions + + supernode-3: + image: flwr/supernode:${FLWR_VERSION:-|stable_flwr_version|} + command: + - --insecure + - --superlink + - superlink:9092 + - --supernode-address + - 0.0.0.0:9096 + - --isolation + - process + - --node-config + - "partition-id=1 num-partitions=2" + depends_on: + - superlink + + clientapp-3: + build: + context: ${PROJECT_DIR:-.} + dockerfile_inline: | + FROM flwr/clientapp:${FLWR_VERSION:-|stable_flwr_version|} + + USER root + RUN apt-get update \ + && apt-get -y --no-install-recommends install \ + build-essential \ + && rm -rf /var/lib/apt/lists/* + USER app + + WORKDIR /app + COPY --chown=app:app pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flwr-clientapp"] + command: + - --supernode + - supernode-3:9096 + deploy: + resources: + limits: + cpus: "2" + stop_signal: SIGINT + depends_on: + - supernode-3 + +If you also want to enable TLS for the new SuperNodes, duplicate the SuperNode +definition for each new SuperNode service in the ``with-tls.yml`` file. Make sure that the names of the services match with the one in the ``compose.yml`` file. In ``with-tls.yml``, add the following: .. code-block:: yaml - :caption: with-tls.yml - - # other service definitions - - supernode-3: - command: - - --superlink - - superlink:9092 - - --supernode-address - - 0.0.0.0:9096 - - --isolation - - process - - --node-config - - "partition-id=1 num-partitions=2" - - --root-certificates - - certificates/ca.crt - secrets: - - source: superlink-ca-certfile - target: /app/certificates/ca.crt + :caption: with-tls.yml + + # other service definitions + + supernode-3: + command: + - --superlink + - superlink:9092 + - --supernode-address + - 0.0.0.0:9096 + - --isolation + - process + - --node-config + - "partition-id=1 num-partitions=2" + - --root-certificates + - certificates/ca.crt + secrets: + - source: superlink-ca-certfile + target: /app/certificates/ca.crt Step 8: Persisting the SuperLink State and Enabling TLS ------------------------------------------------------- -To run Flower with persisted SuperLink state and enabled TLS, a slight change in the ``with-state.yml`` -file is required: +To run Flower with persisted SuperLink state and enabled TLS, a slight change in the +``with-state.yml`` file is required: -#. Comment out the lines 2-4 and uncomment the lines 5-9: +1. Comment out the lines 2-4 and uncomment the lines 5-9: .. code-block:: yaml - :caption: with-state.yml - :linenos: - :emphasize-lines: 2-9 - - superlink: - # command: - # - --insecure - # - --database=state/state.db - command: - - --ssl-ca-certfile=certificates/ca.crt - - --ssl-certfile=certificates/server.pem - - --ssl-keyfile=certificates/server.key - - --database=state/state.db - volumes: - - ./state/:/app/state/:rw - -#. Restart the services: + :caption: with-state.yml + :linenos: + :emphasize-lines: 2-9 + + superlink: + # command: + # - --insecure + # - --database=state/state.db + command: + - --ssl-ca-certfile=certificates/ca.crt + - --ssl-certfile=certificates/server.pem + - --ssl-keyfile=certificates/server.key + - --database=state/state.db + volumes: + - ./state/:/app/state/:rw + +2. Restart the services: .. code-block:: bash - $ docker compose -f compose.yml -f with-tls.yml -f with-state.yml up --build -d + $ docker compose -f compose.yml -f with-tls.yml -f with-state.yml up --build -d -#. Rerun the ``quickstart-compose`` project: +3. Rerun the ``quickstart-compose`` project: .. code-block:: bash - $ flwr run quickstart-compose docker-compose-tls - $ docker compose logs superexec -f + $ flwr run quickstart-compose docker-compose-tls + $ docker compose logs superexec -f Step 9: Merge Multiple Compose Files ------------------------------------ -You can merge multiple Compose files into a single file. For instance, if you wish to combine -the basic configuration with the TLS configuration, execute the following command: +You can merge multiple Compose files into a single file. For instance, if you wish to +combine the basic configuration with the TLS configuration, execute the following +command: .. code-block:: bash - $ docker compose -f compose.yml \ - -f with-tls.yml config --no-path-resolution > my_compose.yml + $ docker compose -f compose.yml \ + -f with-tls.yml config --no-path-resolution > my_compose.yml -This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into a new file called -``my_compose.yml``. +This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into a new file +called ``my_compose.yml``. Step 10: Clean Up ----------------- @@ -394,10 +401,10 @@ Remove all services and volumes: .. code-block:: bash - $ docker compose down -v - $ docker compose -f certs.yml down -v + $ docker compose down -v + $ docker compose -f certs.yml down -v Where to Go Next ---------------- -* :doc:`run-quickstart-examples-docker-compose` +- :doc:`run-quickstart-examples-docker-compose` diff --git a/doc/source/docker/tutorial-quickstart-docker.rst b/doc/source/docker/tutorial-quickstart-docker.rst index 189d019cb097..993754dcf109 100644 --- a/doc/source/docker/tutorial-quickstart-docker.rst +++ b/doc/source/docker/tutorial-quickstart-docker.rst @@ -1,11 +1,11 @@ Quickstart with Docker ====================== -This quickstart aims to guide you through the process of containerizing a Flower project and -running it end to end using Docker on your local machine. +This quickstart aims to guide you through the process of containerizing a Flower project +and running it end to end using Docker on your local machine. -This tutorial does not use production-ready settings, so you can focus on understanding the basic -workflow that uses the minimum configurations. +This tutorial does not use production-ready settings, so you can focus on understanding +the basic workflow that uses the minimum configurations. Prerequisites ------------- @@ -18,33 +18,33 @@ Before you start, make sure that: Step 1: Set Up -------------- -#. Create a new Flower project (PyTorch): +1. Create a new Flower project (PyTorch): .. code-block:: bash - $ flwr new quickstart-docker --framework PyTorch --username flower + $ flwr new quickstart-docker --framework PyTorch --username flower - 🔨 Creating Flower project quickstart-docker... - 🎊 Project creation successful. + 🔨 Creating Flower project quickstart-docker... + 🎊 Project creation successful. - Use the following command to run your project: + Use the following command to run your project: - cd quickstart-docker - pip install -e . - flwr run + cd quickstart-docker + pip install -e . + flwr run - $ cd quickstart-docker - $ pip install -e . + $ cd quickstart-docker + $ pip install -e . -#. Create a new Docker bridge network called ``flwr-network``: +2. Create a new Docker bridge network called ``flwr-network``: .. code-block:: bash - $ docker network create --driver bridge flwr-network + $ docker network create --driver bridge flwr-network - User-defined networks, such as ``flwr-network``, enable IP resolution of container names, a feature - absent in the default bridge network. This simplifies quickstart example by avoiding the need to - determine host IP first. + User-defined networks, such as ``flwr-network``, enable IP resolution of container + names, a feature absent in the default bridge network. This simplifies quickstart + example by avoiding the need to determine host IP first. Step 2: Start the SuperLink --------------------------- @@ -52,334 +52,339 @@ Step 2: Start the SuperLink Open your terminal and run: .. code-block:: bash - :substitutions: + :substitutions: - $ docker run --rm \ - -p 9091:9091 -p 9092:9092 \ - --network flwr-network \ - --name superlink \ - --detach \ - flwr/superlink:|stable_flwr_version| --insecure + $ docker run --rm \ + -p 9091:9091 -p 9092:9092 \ + --network flwr-network \ + --name superlink \ + --detach \ + flwr/superlink:|stable_flwr_version| --insecure .. dropdown:: Understand the command - * ``docker run``: This tells Docker to run a container from an image. - * ``--rm``: Remove the container once it is stopped or the command exits. - * | ``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the container to the same port of - | the host machine, allowing other services to access the Driver API on - | ``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``. - * ``--network flwr-network``: Make the container join the network named ``flwr-network``. - * ``--name superlink``: Assign the name ``superlink`` to the container. - * ``--detach``: Run the container in the background, freeing up the terminal. - * | :substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of the image to be run and the specific - | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a :doc:`specific version ` of the image. - * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing - | unencrypted communication. + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the container to the same port of + | the host machine, allowing other services to access the Driver API on + | ``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``. + * ``--network flwr-network``: Make the container join the network named ``flwr-network``. + * ``--name superlink``: Assign the name ``superlink`` to the container. + * ``--detach``: Run the container in the background, freeing up the terminal. + * | :substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of the image to be run and the specific + | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a :doc:`specific version ` of the image. + * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing + | unencrypted communication. Step 3: Start the SuperNode --------------------------- Start two SuperNode containers. -#. Start the first container: +1. Start the first container: .. code-block:: bash - :substitutions: - - $ docker run --rm \ - -p 9094:9094 \ - --network flwr-network \ - --name supernode-1 \ - --detach \ - flwr/supernode:|stable_flwr_version| \ - --insecure \ - --superlink superlink:9092 \ - --node-config "partition-id=0 num-partitions=2" \ - --supernode-address 0.0.0.0:9094 \ - --isolation process + :substitutions: + + $ docker run --rm \ + -p 9094:9094 \ + --network flwr-network \ + --name supernode-1 \ + --detach \ + flwr/supernode:|stable_flwr_version| \ + --insecure \ + --superlink superlink:9092 \ + --node-config "partition-id=0 num-partitions=2" \ + --supernode-address 0.0.0.0:9094 \ + --isolation process .. dropdown:: Understand the command - * ``docker run``: This tells Docker to run a container from an image. - * ``--rm``: Remove the container once it is stopped or the command exits. - * | ``-p 9094:9094``: Map port ``9094`` of the container to the same port of - | the host machine, allowing other services to access the SuperNode API on - | ``http://localhost:9094``. - * ``--network flwr-network``: Make the container join the network named ``flwr-network``. - * ``--name supernode-1``: Assign the name ``supernode-1`` to the container. - * ``--detach``: Run the container in the background, freeing up the terminal. - * | ``flwr/supernode:|stable_flwr_version|``: This is the name of the image to be run and the specific tag - | of the image. - * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing - | unencrypted communication. - * | ``--superlink superlink:9092``: Connect to the SuperLink's Fleet API at the address - | ``superlink:9092``. - * | ``--node-config "partition-id=0 num-partitions=2"``: Set the partition ID to ``0`` and the - | number of partitions to ``2`` for the SuperNode configuration. - * | ``--supernode-address 0.0.0.0:9094``: Set the address and port number that the SuperNode - | is listening on. - * | ``--isolation process``: Tells the SuperNode that the ClientApp is created by separate - | independent process. The SuperNode does not attempt to create it. - -#. Start the second container: + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``-p 9094:9094``: Map port ``9094`` of the container to the same port of + | the host machine, allowing other services to access the SuperNode API on + | ``http://localhost:9094``. + * ``--network flwr-network``: Make the container join the network named ``flwr-network``. + * ``--name supernode-1``: Assign the name ``supernode-1`` to the container. + * ``--detach``: Run the container in the background, freeing up the terminal. + * | ``flwr/supernode:|stable_flwr_version|``: This is the name of the image to be run and the specific tag + | of the image. + * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing + | unencrypted communication. + * | ``--superlink superlink:9092``: Connect to the SuperLink's Fleet API at the address + | ``superlink:9092``. + * | ``--node-config "partition-id=0 num-partitions=2"``: Set the partition ID to ``0`` and the + | number of partitions to ``2`` for the SuperNode configuration. + * | ``--supernode-address 0.0.0.0:9094``: Set the address and port number that the SuperNode + | is listening on. + * | ``--isolation process``: Tells the SuperNode that the ClientApp is created by separate + | independent process. The SuperNode does not attempt to create it. + +2. Start the second container: .. code-block:: shell - :substitutions: - - $ docker run --rm \ - -p 9095:9095 \ - --network flwr-network \ - --name supernode-2 \ - --detach \ - flwr/supernode:|stable_flwr_version| \ - --insecure \ - --superlink superlink:9092 \ - --node-config "partition-id=1 num-partitions=2" \ - --supernode-address 0.0.0.0:9095 \ - --isolation process + :substitutions: + + $ docker run --rm \ + -p 9095:9095 \ + --network flwr-network \ + --name supernode-2 \ + --detach \ + flwr/supernode:|stable_flwr_version| \ + --insecure \ + --superlink superlink:9092 \ + --node-config "partition-id=1 num-partitions=2" \ + --supernode-address 0.0.0.0:9095 \ + --isolation process Step 4: Start the ClientApp --------------------------- -The ClientApp Docker image comes with a pre-installed version of Flower and serves as a base for -building your own ClientApp image. In order to install the FAB dependencies, you will need to create -a Dockerfile that extends the ClientApp image and installs the required dependencies. +The ClientApp Docker image comes with a pre-installed version of Flower and serves as a +base for building your own ClientApp image. In order to install the FAB dependencies, +you will need to create a Dockerfile that extends the ClientApp image and installs the +required dependencies. -#. Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste the following code into it: +1. Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste the following + code into it: .. code-block:: dockerfile - :caption: Dockerfile.clientapp - :linenos: - :substitutions: + :caption: Dockerfile.clientapp + :linenos: + :substitutions: - FROM flwr/clientapp:|stable_flwr_version| + FROM flwr/clientapp:|stable_flwr_version| - WORKDIR /app - COPY pyproject.toml . - RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ - && python -m pip install -U --no-cache-dir . + WORKDIR /app + COPY pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . - ENTRYPOINT ["flwr-clientapp"] + ENTRYPOINT ["flwr-clientapp"] .. dropdown:: Understand the Dockerfile - * | :substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line specifies that the Docker image - | to be built from is the ``flwr/clientapp image``, version :substitution-code:`|stable_flwr_version|`. - * | ``WORKDIR /app``: Set the working directory for the container to ``/app``. - | Any subsequent commands that reference a directory will be relative to this directory. - * | ``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file - | from the current working directory into the container's ``/app`` directory. - * | ``RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml``: Remove the ``flwr`` dependency - | from the ``pyproject.toml``. - * | ``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install command to - | install the dependencies defined in the ``pyproject.toml`` file - | - | The ``-U`` flag indicates that any existing packages should be upgraded, and - | ``--no-cache-dir`` prevents pip from using the cache to speed up the installation. - * | ``ENTRYPOINT ["flwr-clientapp"]``: Set the command ``flwr-clientapp`` to be - | the default command run when the container is started. + * | :substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line specifies that the Docker image + | to be built from is the ``flwr/clientapp image``, version :substitution-code:`|stable_flwr_version|`. + * | ``WORKDIR /app``: Set the working directory for the container to ``/app``. + | Any subsequent commands that reference a directory will be relative to this directory. + * | ``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file + | from the current working directory into the container's ``/app`` directory. + * | ``RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml``: Remove the ``flwr`` dependency + | from the ``pyproject.toml``. + * | ``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install command to + | install the dependencies defined in the ``pyproject.toml`` file + | + | The ``-U`` flag indicates that any existing packages should be upgraded, and + | ``--no-cache-dir`` prevents pip from using the cache to speed up the installation. + * | ``ENTRYPOINT ["flwr-clientapp"]``: Set the command ``flwr-clientapp`` to be + | the default command run when the container is started. .. important:: - Note that `flwr `__ is already installed in the ``flwr/clientapp`` - base image, so only other package dependencies such as ``flwr-datasets``, ``torch``, etc., - need to be installed. As a result, the ``flwr`` dependency is removed from the - ``pyproject.toml`` after it has been copied into the Docker image (see line 5). + Note that `flwr `__ is already installed in the + ``flwr/clientapp`` base image, so only other package dependencies such as + ``flwr-datasets``, ``torch``, etc., need to be installed. As a result, the + ``flwr`` dependency is removed from the ``pyproject.toml`` after it has been + copied into the Docker image (see line 5). -#. Next, build the ClientApp Docker image by running the following command in the directory where - the Dockerfile is located: +2. Next, build the ClientApp Docker image by running the following command in the + directory where the Dockerfile is located: .. code-block:: bash - $ docker build -f Dockerfile.clientapp -t flwr_clientapp:0.0.1 . + $ docker build -f Dockerfile.clientapp -t flwr_clientapp:0.0.1 . .. note:: - The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. Remember that - these values are merely examples, and you can customize them according to your requirements. + The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. Remember + that these values are merely examples, and you can customize them according to + your requirements. -#. Start the first ClientApp container: +3. Start the first ClientApp container: .. code-block:: bash - $ docker run --rm \ - --network flwr-network \ - --detach \ - flwr_clientapp:0.0.1 \ - --supernode supernode-1:9094 + $ docker run --rm \ + --network flwr-network \ + --detach \ + flwr_clientapp:0.0.1 \ + --supernode supernode-1:9094 .. dropdown:: Understand the command - * ``docker run``: This tells Docker to run a container from an image. - * ``--rm``: Remove the container once it is stopped or the command exits. - * ``--network flwr-network``: Make the container join the network named ``flwr-network``. - * ``--detach``: Run the container in the background, freeing up the terminal. - * | ``flwr_clientapp:0.0.1``: This is the name of the image to be run and the specific tag - | of the image. - * | ``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at the address - | ``supernode-1:9094``. + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * ``--network flwr-network``: Make the container join the network named ``flwr-network``. + * ``--detach``: Run the container in the background, freeing up the terminal. + * | ``flwr_clientapp:0.0.1``: This is the name of the image to be run and the specific tag + | of the image. + * | ``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at the address + | ``supernode-1:9094``. -#. Start the second ClientApp container: +4. Start the second ClientApp container: .. code-block:: shell - $ docker run --rm \ - --network flwr-network \ - --detach \ - flwr_clientapp:0.0.1 \ - --supernode supernode-2:9095 + $ docker run --rm \ + --network flwr-network \ + --detach \ + flwr_clientapp:0.0.1 \ + --supernode supernode-2:9095 Step 5: Start the SuperExec --------------------------- -The procedure for building and running a SuperExec image is almost identical to the ClientApp image. +The procedure for building and running a SuperExec image is almost identical to the +ClientApp image. -Similar to the ClientApp image, you will need to create a Dockerfile that extends the SuperExec -image and installs the required FAB dependencies. +Similar to the ClientApp image, you will need to create a Dockerfile that extends the +SuperExec image and installs the required FAB dependencies. -#. Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste the following code in: +1. Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste the following + code in: .. code-block:: dockerfile - :caption: Dockerfile.superexec - :substitutions: + :caption: Dockerfile.superexec + :substitutions: - FROM flwr/superexec:|stable_flwr_version| + FROM flwr/superexec:|stable_flwr_version| - WORKDIR /app + WORKDIR /app - COPY pyproject.toml . - RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ - && python -m pip install -U --no-cache-dir . + COPY pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . - ENTRYPOINT ["flower-superexec", "--executor", "flwr.superexec.deployment:executor"] + ENTRYPOINT ["flower-superexec", "--executor", "flwr.superexec.deployment:executor"] .. dropdown:: Understand the Dockerfile - * | :substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line specifies that the Docker image - | to be built from is the ``flwr/superexec image``, version :substitution-code:`|stable_flwr_version|`. - * | ``WORKDIR /app``: Set the working directory for the container to ``/app``. - | Any subsequent commands that reference a directory will be relative to this directory. - * | ``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file - | from the current working directory into the container's ``/app`` directory. - * | ``RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml``: Remove the ``flwr`` dependency - | from the ``pyproject.toml``. - * | ``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install command to - | install the dependencies defined in the ``pyproject.toml`` file - | - | The ``-U`` flag indicates that any existing packages should be upgraded, and - | ``--no-cache-dir`` prevents pip from using the cache to speed up the installation. - * | ``ENTRYPOINT ["flower-superexec"``: Set the command ``flower-superexec`` to be - | the default command run when the container is started. - | - | ``"--executor", "flwr.superexec.deployment:executor"]`` Use the - | ``flwr.superexec.deployment:executor`` executor to run the ServerApps. - -#. Afterward, in the directory that holds the Dockerfile, execute this Docker command to + * | :substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line specifies that the Docker image + | to be built from is the ``flwr/superexec image``, version :substitution-code:`|stable_flwr_version|`. + * | ``WORKDIR /app``: Set the working directory for the container to ``/app``. + | Any subsequent commands that reference a directory will be relative to this directory. + * | ``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file + | from the current working directory into the container's ``/app`` directory. + * | ``RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml``: Remove the ``flwr`` dependency + | from the ``pyproject.toml``. + * | ``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install command to + | install the dependencies defined in the ``pyproject.toml`` file + | + | The ``-U`` flag indicates that any existing packages should be upgraded, and + | ``--no-cache-dir`` prevents pip from using the cache to speed up the installation. + * | ``ENTRYPOINT ["flower-superexec"``: Set the command ``flower-superexec`` to be + | the default command run when the container is started. + | + | ``"--executor", "flwr.superexec.deployment:executor"]`` Use the + | ``flwr.superexec.deployment:executor`` executor to run the ServerApps. + +2. Afterward, in the directory that holds the Dockerfile, execute this Docker command to build the SuperExec image: .. code-block:: bash - $ docker build -f Dockerfile.superexec -t flwr_superexec:0.0.1 . + $ docker build -f Dockerfile.superexec -t flwr_superexec:0.0.1 . - -#. Start the SuperExec container: +3. Start the SuperExec container: .. code-block:: bash - $ docker run --rm \ - -p 9093:9093 \ - --network flwr-network \ - --name superexec \ - --detach \ - flwr_superexec:0.0.1 \ - --insecure \ - --executor-config superlink=\"superlink:9091\" + $ docker run --rm \ + -p 9093:9093 \ + --network flwr-network \ + --name superexec \ + --detach \ + flwr_superexec:0.0.1 \ + --insecure \ + --executor-config superlink=\"superlink:9091\" .. dropdown:: Understand the command - * ``docker run``: This tells Docker to run a container from an image. - * ``--rm``: Remove the container once it is stopped or the command exits. - * | ``-p 9093:9093``: Map port ``9093`` of the container to the same port of - | the host machine, allowing you to access the SuperExec API on ``http://localhost:9093``. - * ``--network flwr-network``: Make the container join the network named ``flwr-network``. - * ``--name superexec``: Assign the name ``superexec`` to the container. - * ``--detach``: Run the container in the background, freeing up the terminal. - * | ``flwr_superexec:0.0.1``: This is the name of the image to be run and the specific tag - | of the image. - * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing - | unencrypted communication. - * | ``--executor-config superlink=\"superlink:9091\"``: Configure the SuperExec executor to - | connect to the SuperLink running on port ``9091``. + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``-p 9093:9093``: Map port ``9093`` of the container to the same port of + | the host machine, allowing you to access the SuperExec API on ``http://localhost:9093``. + * ``--network flwr-network``: Make the container join the network named ``flwr-network``. + * ``--name superexec``: Assign the name ``superexec`` to the container. + * ``--detach``: Run the container in the background, freeing up the terminal. + * | ``flwr_superexec:0.0.1``: This is the name of the image to be run and the specific tag + | of the image. + * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing + | unencrypted communication. + * | ``--executor-config superlink=\"superlink:9091\"``: Configure the SuperExec executor to + | connect to the SuperLink running on port ``9091``. Step 6: Run the Quickstart Project ---------------------------------- -#. Add the following lines to the ``pyproject.toml``: +1. Add the following lines to the ``pyproject.toml``: .. code-block:: toml - :caption: pyproject.toml + :caption: pyproject.toml - [tool.flwr.federations.docker] - address = "127.0.0.1:9093" - insecure = true + [tool.flwr.federations.docker] + address = "127.0.0.1:9093" + insecure = true -#. Run the ``quickstart-docker`` project by executing the command: +2. Run the ``quickstart-docker`` project by executing the command: .. code-block:: bash - $ flwr run . docker + $ flwr run . docker -#. Follow the SuperExec logs to track the execution of the run: +3. Follow the SuperExec logs to track the execution of the run: .. code-block:: bash - $ docker logs -f superexec + $ docker logs -f superexec Step 7: Update the Application ------------------------------ -#. Change the application code. For example, change the ``seed`` in ``quickstart_docker/task.py`` - to ``43`` and save it: +1. Change the application code. For example, change the ``seed`` in + ``quickstart_docker/task.py`` to ``43`` and save it: .. code-block:: python - :caption: quickstart_docker/task.py + :caption: quickstart_docker/task.py - # ... - partition_train_test = partition.train_test_split(test_size=0.2, seed=43) - # ... + # ... + partition_train_test = partition.train_test_split(test_size=0.2, seed=43) + # ... -#. Stop the current ClientApp containers: +2. Stop the current ClientApp containers: .. code-block:: bash - $ docker stop $(docker ps -a -q --filter ancestor=flwr_clientapp:0.0.1) + $ docker stop $(docker ps -a -q --filter ancestor=flwr_clientapp:0.0.1) -#. Rebuild the FAB and ClientApp image: +3. Rebuild the FAB and ClientApp image: .. code-block:: bash - $ docker build -f Dockerfile.clientapp -t flwr_clientapp:0.0.1 . + $ docker build -f Dockerfile.clientapp -t flwr_clientapp:0.0.1 . -#. Launch two new ClientApp containers based on the newly built image: +4. Launch two new ClientApp containers based on the newly built image: .. code-block:: bash - $ docker run --rm \ - --network flwr-network \ - --detach \ - flwr_clientapp:0.0.1 \ - --supernode supernode-1:9094 - $ docker run --rm \ - --network flwr-network \ - --detach \ - flwr_clientapp:0.0.1 \ - --supernode supernode-2:9095 + $ docker run --rm \ + --network flwr-network \ + --detach \ + flwr_clientapp:0.0.1 \ + --supernode supernode-1:9094 + $ docker run --rm \ + --network flwr-network \ + --detach \ + flwr_clientapp:0.0.1 \ + --supernode supernode-2:9095 -#. Run the updated project: +5. Run the updated project: .. code-block:: bash - $ flwr run . docker + $ flwr run . docker Step 8: Clean Up ---------------- @@ -388,16 +393,16 @@ Remove the containers and the bridge network: .. code-block:: bash - $ docker stop $(docker ps -a -q --filter ancestor=flwr_clientapp:0.0.1) \ - supernode-1 \ - supernode-2 \ - superexec \ - superlink - $ docker network rm flwr-network + $ docker stop $(docker ps -a -q --filter ancestor=flwr_clientapp:0.0.1) \ + supernode-1 \ + supernode-2 \ + superexec \ + superlink + $ docker network rm flwr-network Where to Go Next ---------------- -* :doc:`enable-tls` -* :doc:`persist-superlink-state` -* :doc:`tutorial-quickstart-docker-compose` +- :doc:`enable-tls` +- :doc:`persist-superlink-state` +- :doc:`tutorial-quickstart-docker-compose` diff --git a/doc/source/docker/use-a-different-version.rst b/doc/source/docker/use-a-different-version.rst index 73e5f4218663..9108f5157dcd 100644 --- a/doc/source/docker/use-a-different-version.rst +++ b/doc/source/docker/use-a-different-version.rst @@ -1,12 +1,13 @@ Use a Different Flower Version ============================== -If you want to use a different version of Flower, for example Flower nightly, you can do so by -changing the tag. All available versions are on `Docker Hub `__. +If you want to use a different version of Flower, for example Flower nightly, you can do +so by changing the tag. All available versions are on `Docker Hub +`__. .. important:: - When using Flower nightly, the SuperLink nightly image must be paired with the corresponding - SuperNode and ServerApp nightly images released on the same day. To ensure the versions are - in sync, using the concrete tag, e.g., ``1.10.0.dev20240610`` instead of ``nightly`` is - recommended. + When using Flower nightly, the SuperLink nightly image must be paired with the + corresponding SuperNode and ServerApp nightly images released on the same day. To + ensure the versions are in sync, using the concrete tag, e.g., + ``1.10.0.dev20240610`` instead of ``nightly`` is recommended. diff --git a/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst b/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst index 0139f3b8dc31..4a9d4607d9a5 100644 --- a/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst +++ b/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst @@ -1,16 +1,22 @@ Example: FedBN in PyTorch - From Centralized To Federated ========================================================= -This tutorial will show you how to use Flower to build a federated version of an existing machine learning workload with `FedBN `_, a federated training strategy designed for non-iid data. -We are using PyTorch to train a Convolutional Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. -When applying FedBN, only few changes needed compared to :doc:`Example: PyTorch - From Centralized To Federated `. +This tutorial will show you how to use Flower to build a federated version of an +existing machine learning workload with `FedBN `_, a +federated training strategy designed for non-iid data. We are using PyTorch to train a +Convolutional Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. +When applying FedBN, only few changes needed compared to :doc:`Example: PyTorch - From +Centralized To Federated `. Centralized Training -------------------- -All files are revised based on :doc:`Example: PyTorch - From Centralized To Federated `. -The only thing to do is modifying the file called :code:`cifar.py`, revised part is shown below: -The model architecture defined in class Net() is added with Batch Normalization layers accordingly. +All files are revised based on :doc:`Example: PyTorch - From Centralized To Federated +`. The only thing to do is modifying the +file called ``cifar.py``, revised part is shown below: + +The model architecture defined in class Net() is added with Batch Normalization layers +accordingly. .. code-block:: python @@ -40,26 +46,33 @@ The model architecture defined in class Net() is added with Batch Normalization You can now run your machine learning workload: -.. code-block:: python +.. code-block:: bash python3 cifar.py -So far this should all look fairly familiar if you've used PyTorch before. -Let's take the next step and use what we've built to create a federated learning system within FedBN, the system consists of one server and two clients. +So far this should all look fairly familiar if you've used PyTorch before. Let's take +the next step and use what we've built to create a federated learning system within +FedBN, the system consists of one server and two clients. Federated Training ------------------ -If you have read :doc:`Example: PyTorch - From Centralized To Federated `, the following parts are easy to follow, only :code:`get_parameters` and :code:`set_parameters` function in :code:`client.py` needed to revise. -If not, please read the :doc:`Example: PyTorch - From Centralized To Federated `. first. +If you have read :doc:`Example: PyTorch - From Centralized To Federated +`, the following parts are easy to +follow, only ``get_parameters`` and ``set_parameters`` function in ``client.py`` needed +to revise. If not, please read the :doc:`Example: PyTorch - From Centralized To +Federated `. first. -Our example consists of one *server* and two *clients*. In FedBN, :code:`server.py` keeps unchanged, we can start the server directly. +Our example consists of one *server* and two *clients*. In FedBN, ``server.py`` keeps +unchanged, we can start the server directly. -.. code-block:: python +.. code-block:: bash python3 server.py -Finally, we will revise our *client* logic by changing :code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, we will exclude batch normalization parameters from model parameter list when sending to or receiving from the server. +Finally, we will revise our *client* logic by changing ``get_parameters`` and +``set_parameters`` in ``client.py``, we will exclude batch normalization parameters from +model parameter list when sending to or receiving from the server. .. code-block:: python @@ -71,11 +84,15 @@ Finally, we will revise our *client* logic by changing :code:`get_parameters` an def get_parameters(self, config) -> List[np.ndarray]: # Return model parameters as a list of NumPy ndarrays, excluding parameters of BN layers when using FedBN - return [val.cpu().numpy() for name, val in self.model.state_dict().items() if 'bn' not in name] + return [ + val.cpu().numpy() + for name, val in self.model.state_dict().items() + if "bn" not in name + ] def set_parameters(self, parameters: List[np.ndarray]) -> None: # Set model parameters from a list of NumPy ndarrays - keys = [k for k in self.model.state_dict().keys() if 'bn' not in k] + keys = [k for k in self.model.state_dict().keys() if "bn" not in k] params_dict = zip(keys, parameters) state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) self.model.load_state_dict(state_dict, strict=False) @@ -84,15 +101,20 @@ Finally, we will revise our *client* logic by changing :code:`get_parameters` an Now, you can now open two additional terminal windows and run -.. code-block:: python +.. code-block:: bash python3 client.py -in each window (make sure that the server is still running before you do so) and see your (previously centralized) PyTorch project run federated learning with FedBN strategy across two clients. Congratulations! +in each window (make sure that the server is still running before you do so) and see +your (previously centralized) PyTorch project run federated learning with FedBN strategy +across two clients. Congratulations! Next Steps ---------- -The full source code for this example can be found `here `_. -Our example is of course somewhat over-simplified because both clients load the exact same dataset, which isn't realistic. -You're now prepared to explore this topic further. How about using different subsets of CIFAR-10 on each client? How about adding more clients? +The full source code for this example can be found `here +`_. +Our example is of course somewhat over-simplified because both clients load the exact +same dataset, which isn't realistic. You're now prepared to explore this topic further. +How about using different subsets of CIFAR-10 on each client? How about adding more +clients? diff --git a/doc/source/example-pytorch-from-centralized-to-federated.rst b/doc/source/example-pytorch-from-centralized-to-federated.rst index 0c458a136a81..9629a7fed6e8 100644 --- a/doc/source/example-pytorch-from-centralized-to-federated.rst +++ b/doc/source/example-pytorch-from-centralized-to-federated.rst @@ -1,21 +1,25 @@ Example: PyTorch - From Centralized To Federated ================================================ -This tutorial will show you how to use Flower to build a federated version of an existing machine learning workload. -We are using PyTorch to train a Convolutional Neural Network on the CIFAR-10 dataset. -First, we introduce this machine learning task with a centralized training approach based on the `Deep Learning with PyTorch `_ tutorial. -Then, we build upon the centralized training code to run the training in a federated fashion. +This tutorial will show you how to use Flower to build a federated version of an +existing machine learning workload. We are using PyTorch to train a Convolutional Neural +Network on the CIFAR-10 dataset. First, we introduce this machine learning task with a +centralized training approach based on the `Deep Learning with PyTorch +`_ tutorial. Then, +we build upon the centralized training code to run the training in a federated fashion. Centralized Training -------------------- -We begin with a brief description of the centralized CNN training code. -If you want a more in-depth explanation of what's going on then have a look at the official `PyTorch tutorial `_. +We begin with a brief description of the centralized CNN training code. If you want a +more in-depth explanation of what's going on then have a look at the official `PyTorch +tutorial `_. -Let's create a new file called :code:`cifar.py` with all the components required for a traditional (centralized) training on CIFAR-10. -First, all required packages (such as :code:`torch` and :code:`torchvision`) need to be imported. -You can see that we do not import any package for federated learning. -You can keep all these imports as they are even when we add the federated learning components at a later point. +Let's create a new file called ``cifar.py`` with all the components required for a +traditional (centralized) training on CIFAR-10. First, all required packages (such as +``torch`` and ``torchvision``) need to be imported. You can see that we do not import +any package for federated learning. You can keep all these imports as they are even when +we add the federated learning components at a later point. .. code-block:: python @@ -29,7 +33,9 @@ You can keep all these imports as they are even when we add the federated learni from torch import Tensor from torchvision.datasets import CIFAR10 -As already mentioned we will use the CIFAR-10 dataset for this machine learning workload. The model architecture (a very simple Convolutional Neural Network) is defined in :code:`class Net()`. +As already mentioned we will use the CIFAR-10 dataset for this machine learning +workload. The model architecture (a very simple Convolutional Neural Network) is defined +in ``class Net()``. .. code-block:: python @@ -53,13 +59,17 @@ As already mentioned we will use the CIFAR-10 dataset for this machine learning x = self.fc3(x) return x -The :code:`load_data()` function loads the CIFAR-10 training and test sets. The :code:`transform` normalized the data after loading. +The ``load_data()`` function loads the CIFAR-10 training and test sets. The +``transform`` normalized the data after loading. .. code-block:: python DATA_ROOT = "~/data/cifar-10" - def load_data() -> Tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader, Dict]: + + def load_data() -> ( + Tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader, Dict] + ): """Load CIFAR-10 (training and test set).""" transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] @@ -68,12 +78,15 @@ The :code:`load_data()` function loads the CIFAR-10 training and test sets. The trainloader = torch.utils.data.DataLoader(trainset, batch_size=32, shuffle=True) testset = CIFAR10(DATA_ROOT, train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False) - num_examples = {"trainset" : len(trainset), "testset" : len(testset)} + num_examples = {"trainset": len(trainset), "testset": len(testset)} return trainloader, testloader, num_examples -We now need to define the training (function :code:`train()`) which loops over the training set, measures the loss, backpropagates it, and then takes one optimizer step for each batch of training examples. +We now need to define the training (function ``train()``) which loops over the training +set, measures the loss, backpropagates it, and then takes one optimizer step for each +batch of training examples. -The evaluation of the model is defined in the function :code:`test()`. The function loops over all test samples and measures the loss of the model based on the test dataset. +The evaluation of the model is defined in the function ``test()``. The function loops +over all test samples and measures the loss of the model based on the test dataset. .. code-block:: python @@ -133,7 +146,8 @@ The evaluation of the model is defined in the function :code:`test()`. The funct accuracy = correct / total return loss, accuracy -Having defined the data loading, model architecture, training, and evaluation we can put everything together and train our CNN on CIFAR-10. +Having defined the data loading, model architecture, training, and evaluation we can put +everything together and train our CNN on CIFAR-10. .. code-block:: python @@ -143,7 +157,7 @@ Having defined the data loading, model architecture, training, and evaluation we print("Load data") trainloader, testloader, _ = load_data() print("Start training") - net=Net().to(DEVICE) + net = Net().to(DEVICE) train(net=net, trainloader=trainloader, epochs=2, device=DEVICE) print("Evaluate model") loss, accuracy = test(net=net, testloader=testloader, device=DEVICE) @@ -156,46 +170,57 @@ Having defined the data loading, model architecture, training, and evaluation we You can now run your machine learning workload: -.. code-block:: python +.. code-block:: bash python3 cifar.py -So far, this should all look fairly familiar if you've used PyTorch before. -Let's take the next step and use what we've built to create a simple federated learning system consisting of one server and two clients. +So far, this should all look fairly familiar if you've used PyTorch before. Let's take +the next step and use what we've built to create a simple federated learning system +consisting of one server and two clients. Federated Training ------------------ -The simple machine learning project discussed in the previous section trains the model on a single dataset (CIFAR-10), we call this centralized learning. -This concept of centralized learning, as shown in the previous section, is probably known to most of you, and many of you have used it previously. -Normally, if you'd want to run machine learning workloads in a federated fashion, then you'd have to change most of your code and set everything up from scratch. This can be a considerable effort. +The simple machine learning project discussed in the previous section trains the model +on a single dataset (CIFAR-10), we call this centralized learning. This concept of +centralized learning, as shown in the previous section, is probably known to most of +you, and many of you have used it previously. Normally, if you'd want to run machine +learning workloads in a federated fashion, then you'd have to change most of your code +and set everything up from scratch. This can be a considerable effort. -However, with Flower you can evolve your pre-existing code into a federated learning setup without the need for a major rewrite. +However, with Flower you can evolve your pre-existing code into a federated learning +setup without the need for a major rewrite. -The concept is easy to understand. -We have to start a *server* and then use the code in :code:`cifar.py` for the *clients* that are connected to the *server*. -The *server* sends model parameters to the clients. The *clients* run the training and update the parameters. -The updated parameters are sent back to the *server* which averages all received parameter updates. -This describes one round of the federated learning process and we repeat this for multiple rounds. +The concept is easy to understand. We have to start a *server* and then use the code in +``cifar.py`` for the *clients* that are connected to the *server*. The *server* sends +model parameters to the clients. The *clients* run the training and update the +parameters. The updated parameters are sent back to the *server* which averages all +received parameter updates. This describes one round of the federated learning process +and we repeat this for multiple rounds. -Our example consists of one *server* and two *clients*. Let's set up :code:`server.py` first. The *server* needs to import the Flower package :code:`flwr`. -Next, we use the :code:`start_server` function to start a server and tell it to perform three rounds of federated learning. +Our example consists of one *server* and two *clients*. Let's set up ``server.py`` +first. The *server* needs to import the Flower package ``flwr``. Next, we use the +``start_server`` function to start a server and tell it to perform three rounds of +federated learning. .. code-block:: python import flwr as fl if __name__ == "__main__": - fl.server.start_server(server_address="0.0.0.0:8080", config=fl.server.ServerConfig(num_rounds=3)) + fl.server.start_server( + server_address="0.0.0.0:8080", config=fl.server.ServerConfig(num_rounds=3) + ) We can already start the *server*: -.. code-block:: python +.. code-block:: bash python3 server.py -Finally, we will define our *client* logic in :code:`client.py` and build upon the previously defined centralized training in :code:`cifar.py`. -Our *client* needs to import :code:`flwr`, but also :code:`torch` to update the parameters on our PyTorch model: +Finally, we will define our *client* logic in ``client.py`` and build upon the +previously defined centralized training in ``cifar.py``. Our *client* needs to import +``flwr``, but also ``torch`` to update the parameters on our PyTorch model: .. code-block:: python @@ -210,28 +235,38 @@ Our *client* needs to import :code:`flwr`, but also :code:`torch` to update the DEVICE: str = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") -Implementing a Flower *client* basically means implementing a subclass of either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. -Our implementation will be based on :code:`flwr.client.NumPyClient` and we'll call it :code:`CifarClient`. -:code:`NumPyClient` is slightly easier to implement than :code:`Client` if you use a framework with good NumPy interoperability (like PyTorch or TensorFlow/Keras) because it avoids some of the boilerplate that would otherwise be necessary. -:code:`CifarClient` needs to implement four methods, two methods for getting/setting model parameters, one method for training the model, and one method for testing the model: - -#. :code:`set_parameters` - * set the model parameters on the local model that are received from the server - * loop over the list of model parameters received as NumPy :code:`ndarray`'s (think list of neural network layers) -#. :code:`get_parameters` - * get the model parameters and return them as a list of NumPy :code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects) -#. :code:`fit` - * update the parameters of the local model with the parameters received from the server - * train the model on the local training set - * get the updated local model weights and return them to the server -#. :code:`evaluate` - * update the parameters of the local model with the parameters received from the server - * evaluate the updated model on the local test set - * return the local loss and accuracy to the server - -The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make use of the functions :code:`train()` and :code:`test()` previously defined in :code:`cifar.py`. -So what we really do here is we tell Flower through our :code:`NumPyClient` subclass which of our already defined functions to call for training and evaluation. -We included type annotations to give you a better understanding of the data types that get passed around. +Implementing a Flower *client* basically means implementing a subclass of either +``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our implementation will be based +on ``flwr.client.NumPyClient`` and we'll call it ``CifarClient``. ``NumPyClient`` is +slightly easier to implement than ``Client`` if you use a framework with good NumPy +interoperability (like PyTorch or TensorFlow/Keras) because it avoids some of the +boilerplate that would otherwise be necessary. ``CifarClient`` needs to implement four +methods, two methods for getting/setting model parameters, one method for training the +model, and one method for testing the model: + +1. ``set_parameters`` + - set the model parameters on the local model that are received from the server + - loop over the list of model parameters received as NumPy ``ndarray``'s (think + list of neural network layers) +2. ``get_parameters`` + - get the model parameters and return them as a list of NumPy ``ndarray``'s + (which is what ``flwr.client.NumPyClient`` expects) +3. ``fit`` + - update the parameters of the local model with the parameters received from the + server + - train the model on the local training set + - get the updated local model weights and return them to the server +4. ``evaluate`` + - update the parameters of the local model with the parameters received from the + server + - evaluate the updated model on the local test set + - return the local loss and accuracy to the server + +The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the functions +``train()`` and ``test()`` previously defined in ``cifar.py``. So what we really do here +is we tell Flower through our ``NumPyClient`` subclass which of our already defined +functions to call for training and evaluation. We included type annotations to give you +a better understanding of the data types that get passed around. .. code-block:: python @@ -277,8 +312,10 @@ We included type annotations to give you a better understanding of the data type loss, accuracy = cifar.test(self.model, self.testloader, device=DEVICE) return float(loss), self.num_examples["testset"], {"accuracy": float(accuracy)} -All that's left to do it to define a function that loads both model and data, creates a :code:`CifarClient`, and starts this client. -You load your data and model by using :code:`cifar.py`. Start :code:`CifarClient` with the function :code:`fl.client.start_client()` by pointing it at the same IP address we used in :code:`server.py`: +All that's left to do it to define a function that loads both model and data, creates a +``CifarClient``, and starts this client. You load your data and model by using +``cifar.py``. Start ``CifarClient`` with the function ``fl.client.start_client()`` by +pointing it at the same IP address we used in ``server.py``: .. code-block:: python @@ -300,15 +337,20 @@ You load your data and model by using :code:`cifar.py`. Start :code:`CifarClient And that's it. You can now open two additional terminal windows and run -.. code-block:: python +.. code-block:: bash python3 client.py -in each window (make sure that the server is running before you do so) and see your (previously centralized) PyTorch project run federated learning across two clients. Congratulations! +in each window (make sure that the server is running before you do so) and see your +(previously centralized) PyTorch project run federated learning across two clients. +Congratulations! Next Steps ---------- -The full source code for this example: `PyTorch: From Centralized To Federated (Code) `_. -Our example is, of course, somewhat over-simplified because both clients load the exact same dataset, which isn't realistic. -You're now prepared to explore this topic further. How about using different subsets of CIFAR-10 on each client? How about adding more clients? +The full source code for this example: `PyTorch: From Centralized To Federated (Code) +`_. +Our example is, of course, somewhat over-simplified because both clients load the exact +same dataset, which isn't realistic. You're now prepared to explore this topic further. +How about using different subsets of CIFAR-10 on each client? How about adding more +clients? diff --git a/doc/source/explanation-differential-privacy.rst b/doc/source/explanation-differential-privacy.rst index e488f5ccbd57..06e9dbdedd39 100644 --- a/doc/source/explanation-differential-privacy.rst +++ b/doc/source/explanation-differential-privacy.rst @@ -1,133 +1,171 @@ Differential Privacy ==================== -The information in datasets like healthcare, financial transactions, user preferences, etc., is valuable and has the potential for scientific breakthroughs and provides important business insights. -However, such data is also sensitive and there is a risk of compromising individual privacy. -Traditional methods like anonymization alone would not work because of attacks like Re-identification and Data Linkage. -That's where differential privacy comes in. It provides the possibility of analyzing data while ensuring the privacy of individuals. +The information in datasets like healthcare, financial transactions, user preferences, +etc., is valuable and has the potential for scientific breakthroughs and provides +important business insights. However, such data is also sensitive and there is a risk of +compromising individual privacy. +Traditional methods like anonymization alone would not work because of attacks like +Re-identification and Data Linkage. That's where differential privacy comes in. It +provides the possibility of analyzing data while ensuring the privacy of individuals. Differential Privacy -------------------- -Imagine two datasets that are identical except for a single record (for instance, Alice's data). -Differential Privacy (DP) guarantees that any analysis (M), like calculating the average income, will produce nearly identical results for both datasets (O and O' would be similar). -This preserves group patterns while obscuring individual details, ensuring the individual's information remains hidden in the crowd. -.. image:: ./_static/DP/dp-intro.png - :align: center - :width: 400 - :alt: DP Intro +Imagine two datasets that are identical except for a single record (for instance, +Alice's data). Differential Privacy (DP) guarantees that any analysis (M), like +calculating the average income, will produce nearly identical results for both datasets +(O and O' would be similar). This preserves group patterns while obscuring individual +details, ensuring the individual's information remains hidden in the crowd. +.. image:: ./_static/DP/dp-intro.png + :align: center + :width: 400 + :alt: DP Intro -One of the most commonly used mechanisms to achieve DP is adding enough noise to the output of the analysis to mask the contribution of each individual in the data while preserving the overall accuracy of the analysis. +One of the most commonly used mechanisms to achieve DP is adding enough noise to the +output of the analysis to mask the contribution of each individual in the data while +preserving the overall accuracy of the analysis. Formal Definition ~~~~~~~~~~~~~~~~~ -Differential Privacy (DP) provides statistical guarantees against the information an adversary can infer through the output of a randomized algorithm. -It provides an unconditional upper bound on the influence of a single individual on the output of the algorithm by adding noise [1]. -A randomized mechanism -M provides (:math:`\epsilon`, :math:`\delta`)-differential privacy if for any two neighboring databases, D :sub:`1` and D :sub:`2`, that differ in only a single record, -and for all possible outputs S ⊆ Range(A): - -.. math:: - \small - P[M(D_{1} \in A)] \leq e^{\epsilon} P[M(D_{2} \in A)] + \delta +Differential Privacy (DP) provides statistical guarantees against the information an +adversary can infer through the output of a randomized algorithm. It provides an +unconditional upper bound on the influence of a single individual on the output of the +algorithm by adding noise [1]. A randomized mechanism M provides (:math:`\epsilon`, +:math:`\delta`)-differential privacy if for any two neighboring databases, D :sub:`1` +and D :sub:`2`, that differ in only a single record, and for all possible outputs S ⊆ +Range(A): +.. math:: -The :math:`\epsilon` parameter, also known as the privacy budget, is a metric of privacy loss. -It also controls the privacy-utility trade-off; lower :math:`\epsilon` values indicate higher levels of privacy but are likely to reduce utility as well. -The :math:`\delta` parameter accounts for a small probability on which the upper bound :math:`\epsilon` does not hold. -The amount of noise needed to achieve differential privacy is proportional to the sensitivity of the output, which measures the maximum change in the output due to the inclusion or removal of a single record. + \small + P[M(D_{1} \in A)] \leq e^{\epsilon} P[M(D_{2} \in A)] + \delta +The :math:`\epsilon` parameter, also known as the privacy budget, is a metric of privacy +loss. It also controls the privacy-utility trade-off; lower :math:`\epsilon` values +indicate higher levels of privacy but are likely to reduce utility as well. The +:math:`\delta` parameter accounts for a small probability on which the upper bound +:math:`\epsilon` does not hold. The amount of noise needed to achieve differential +privacy is proportional to the sensitivity of the output, which measures the maximum +change in the output due to the inclusion or removal of a single record. Differential Privacy in Machine Learning ---------------------------------------- + DP can be utilized in machine learning to preserve the privacy of the training data. -Differentially private machine learning algorithms are designed in a way to prevent the algorithm to learn any specific information about any individual data points and subsequently prevent the model from revealing sensitive information. -Depending on the stage at which noise is introduced, various methods exist for applying DP to machine learning algorithms. -One approach involves adding noise to the training data (either to the features or labels), while another method entails injecting noise into the gradients of the loss function during model training. -Additionally, such noise can be incorporated into the model's output. +Differentially private machine learning algorithms are designed in a way to prevent the +algorithm to learn any specific information about any individual data points and +subsequently prevent the model from revealing sensitive information. Depending on the +stage at which noise is introduced, various methods exist for applying DP to machine +learning algorithms. One approach involves adding noise to the training data (either to +the features or labels), while another method entails injecting noise into the gradients +of the loss function during model training. Additionally, such noise can be incorporated +into the model's output. Differential Privacy in Federated Learning ------------------------------------------ -Federated learning is a data minimization approach that allows multiple parties to collaboratively train a model without sharing their raw data. -However, federated learning also introduces new privacy challenges. The model updates between parties and the central server can leak information about the local data. -These leaks can be exploited by attacks such as membership inference and property inference attacks, or model inversion attacks. -DP can play a crucial role in federated learning to provide privacy for the clients' data. +Federated learning is a data minimization approach that allows multiple parties to +collaboratively train a model without sharing their raw data. However, federated +learning also introduces new privacy challenges. The model updates between parties and +the central server can leak information about the local data. These leaks can be +exploited by attacks such as membership inference and property inference attacks, or +model inversion attacks. -Depending on the granularity of privacy provision or the location of noise addition, different forms of DP exist in federated learning. -In this explainer, we focus on two approaches of DP utilization in federated learning based on where the noise is added: at the server (also known as the center) or at the client (also known as the local). +DP can play a crucial role in federated learning to provide privacy for the clients' +data. -- **Central Differential Privacy**: DP is applied by the server and the goal is to prevent the aggregated model from leaking information about each client's data. +Depending on the granularity of privacy provision or the location of noise addition, +different forms of DP exist in federated learning. In this explainer, we focus on two +approaches of DP utilization in federated learning based on where the noise is added: at +the server (also known as the center) or at the client (also known as the local). -- **Local Differential Privacy**: DP is applied on the client side before sending any information to the server and the goal is to prevent the updates that are sent to the server from leaking any information about the client's data. +- **Central Differential Privacy**: DP is applied by the server and the goal is to + prevent the aggregated model from leaking information about each client's data. +- **Local Differential Privacy**: DP is applied on the client side before sending any + information to the server and the goal is to prevent the updates that are sent to the + server from leaking any information about the client's data. Central Differential Privacy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In this approach, which is also known as user-level DP, the central server is responsible for adding noise to the globally aggregated parameters. It should be noted that trust in the server is required. + +In this approach, which is also known as user-level DP, the central server is +responsible for adding noise to the globally aggregated parameters. It should be noted +that trust in the server is required. .. image:: ./_static/DP/CDP.png - :align: center - :width: 400 - :alt: Central Differential Privacy - -While there are various ways to implement central DP in federated learning, we concentrate on the algorithms proposed by [2] and [3]. -The overall approach is to clip the model updates sent by the clients and add some amount of noise to the aggregated model. -In each iteration, a random set of clients is chosen with a specific probability for training. -Each client performs local training on its own data. -The update of each client is then clipped by some value `S` (sensitivity `S`). -This would limit the impact of any individual client which is crucial for privacy and often beneficial for robustness. -A common approach to achieve this is by restricting the `L2` norm of the clients' model updates, ensuring that larger updates are scaled down to fit within the norm `S`. + :align: center + :width: 400 + :alt: Central Differential Privacy + +While there are various ways to implement central DP in federated learning, we +concentrate on the algorithms proposed by [2] and [3]. The overall approach is to clip +the model updates sent by the clients and add some amount of noise to the aggregated +model. In each iteration, a random set of clients is chosen with a specific probability +for training. Each client performs local training on its own data. The update of each +client is then clipped by some value `S` (sensitivity `S`). This would limit the impact +of any individual client which is crucial for privacy and often beneficial for +robustness. A common approach to achieve this is by restricting the `L2` norm of the +clients' model updates, ensuring that larger updates are scaled down to fit within the +norm `S`. .. image:: ./_static/DP/clipping.png - :align: center - :width: 300 - :alt: clipping + :align: center + :width: 300 + :alt: clipping -Afterwards, the Gaussian mechanism is used to add noise in order to distort the sum of all clients' updates. -The amount of noise is scaled to the sensitivity value to obtain a privacy guarantee. -The Gaussian mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( noise_scale * S ) / (number of sampled clients)`. +Afterwards, the Gaussian mechanism is used to add noise in order to distort the sum of +all clients' updates. The amount of noise is scaled to the sensitivity value to obtain a +privacy guarantee. The Gaussian mechanism is used with a noise sampled from `N (0, σ²)` +where `σ = ( noise_scale * S ) / (number of sampled clients)`. Clipping -^^^^^^^^ - -There are two forms of clipping commonly used in Central DP: Fixed Clipping and Adaptive Clipping. +++++++++ -- **Fixed Clipping** : A predefined fix threshold is set for the magnitude of clients' updates. Any update exceeding this threshold is clipped back to the threshold value. +There are two forms of clipping commonly used in Central DP: Fixed Clipping and Adaptive +Clipping. -- **Adaptive Clipping** : The clipping threshold dynamically adjusts based on the observed update distribution [4]. It means that the clipping value is tuned during the rounds with respect to the quantile of the update norm distribution. +- **Fixed Clipping** : A predefined fix threshold is set for the magnitude of clients' + updates. Any update exceeding this threshold is clipped back to the threshold value. +- **Adaptive Clipping** : The clipping threshold dynamically adjusts based on the + observed update distribution [4]. It means that the clipping value is tuned during the + rounds with respect to the quantile of the update norm distribution. -The choice between fixed and adaptive clipping depends on various factors such as privacy requirements, data distribution, model complexity, and others. +The choice between fixed and adaptive clipping depends on various factors such as +privacy requirements, data distribution, model complexity, and others. Local Differential Privacy ~~~~~~~~~~~~~~~~~~~~~~~~~~ -In this approach, each client is responsible for performing DP. -Local DP avoids the need for a fully trusted aggregator, but it should be noted that local DP leads to a decrease in accuracy but better privacy in comparison to central DP. +In this approach, each client is responsible for performing DP. Local DP avoids the need +for a fully trusted aggregator, but it should be noted that local DP leads to a decrease +in accuracy but better privacy in comparison to central DP. .. image:: ./_static/DP/LDP.png - :align: center - :width: 400 - :alt: Local Differential Privacy - + :align: center + :width: 400 + :alt: Local Differential Privacy In this explainer, we focus on two forms of achieving Local DP: -- Each client adds noise to the local updates before sending them to the server. To achieve (:math:`\epsilon`, :math:`\delta`)-DP, considering the sensitivity of the local model to be ∆, Gaussian noise is applied with a noise scale of σ where: +- Each client adds noise to the local updates before sending them to the server. To + achieve (:math:`\epsilon`, :math:`\delta`)-DP, considering the sensitivity of the + local model to be ∆, Gaussian noise is applied with a noise scale of σ where: .. math:: + \small \frac{∆ \times \sqrt{2 \times \log\left(\frac{1.25}{\delta}\right)}}{\epsilon} - -- Each client adds noise to the gradients of the model during the local training (DP-SGD). More specifically, in this approach, gradients are clipped and an amount of calibrated noise is injected into the gradients. - +- Each client adds noise to the gradients of the model during the local training + (DP-SGD). More specifically, in this approach, gradients are clipped and an amount of + calibrated noise is injected into the gradients. Please note that these two approaches are providing privacy at different levels. - **References:** [1] Dwork et al. The Algorithmic Foundations of Differential Privacy. diff --git a/doc/source/explanation-federated-evaluation.rst b/doc/source/explanation-federated-evaluation.rst index bcdca9bae700..c56a5d48b2f6 100644 --- a/doc/source/explanation-federated-evaluation.rst +++ b/doc/source/explanation-federated-evaluation.rst @@ -1,8 +1,8 @@ Federated evaluation ==================== -There are two main approaches to evaluating models in federated learning systems: centralized (or server-side) evaluation and federated (or client-side) evaluation. - +There are two main approaches to evaluating models in federated learning systems: +centralized (or server-side) evaluation and federated (or client-side) evaluation. Centralized Evaluation ---------------------- @@ -10,15 +10,17 @@ Centralized Evaluation Built-In Strategies ~~~~~~~~~~~~~~~~~~~ -All built-in strategies support centralized evaluation by providing an evaluation function during initialization. -An evaluation function is any function that can take the current global model parameters as input and return evaluation results: +All built-in strategies support centralized evaluation by providing an evaluation +function during initialization. An evaluation function is any function that can take the +current global model parameters as input and return evaluation results: .. code-block:: python - + from flwr.common import NDArrays, Scalar - + from typing import Dict, Optional, Tuple + def get_evaluate_fn(model): """Return an evaluation function for server-side evaluation.""" @@ -38,6 +40,7 @@ An evaluation function is any function that can take the current global model pa return evaluate + # Load and compile model for server-side parameter evaluation model = tf.keras.applications.EfficientNetB0( input_shape=(32, 32, 3), weights=None, classes=10 @@ -47,7 +50,7 @@ An evaluation function is any function that can take the current global model pa # Create strategy strategy = fl.server.strategy.FedAvg( - # ... other FedAvg arguments + # ... other FedAvg arguments evaluate_fn=get_evaluate_fn(model), ) @@ -57,9 +60,10 @@ An evaluation function is any function that can take the current global model pa Custom Strategies ~~~~~~~~~~~~~~~~~ -The :code:`Strategy` abstraction provides a method called :code:`evaluate` that can directly be used to evaluate the current global model parameters. -The current server implementation calls :code:`evaluate` after parameter aggregation and before federated evaluation (see next paragraph). - +The ``Strategy`` abstraction provides a method called ``evaluate`` that can directly be +used to evaluate the current global model parameters. The current server implementation +calls ``evaluate`` after parameter aggregation and before federated evaluation (see next +paragraph). Federated Evaluation -------------------- @@ -67,7 +71,8 @@ Federated Evaluation Implementing Federated Evaluation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Client-side evaluation happens in the :code:`Client.evaluate` method and can be configured from the server side. +Client-side evaluation happens in the ``Client.evaluate`` method and can be configured +from the server side. .. code-block:: python @@ -79,9 +84,11 @@ Client-side evaluation happens in the :code:`Client.evaluate` method and can be def get_parameters(self, config): # ... + pass def fit(self, parameters, config): # ... + pass def evaluate(self, parameters, config): """Evaluate parameters on the locally held test set.""" @@ -100,12 +107,27 @@ Client-side evaluation happens in the :code:`Client.evaluate` method and can be Configuring Federated Evaluation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Federated evaluation can be configured from the server side. Built-in strategies support the following arguments: - -- :code:`fraction_evaluate`: a :code:`float` defining the fraction of clients that will be selected for evaluation. If :code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients are connected to the server, then :code:`10` will be randomly selected for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, federated evaluation will be disabled. -- :code:`min_evaluate_clients`: an :code:`int`: the minimum number of clients to be selected for evaluation. If :code:`fraction_evaluate` is set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and :code:`100` clients are connected to the server, then :code:`20` clients will be selected for evaluation. -- :code:`min_available_clients`: an :code:`int` that defines the minimum number of clients which need to be connected to the server before a round of federated evaluation can start. If fewer than :code:`min_available_clients` are connected to the server, the server will wait until more clients are connected before it continues to sample clients for evaluation. -- :code:`on_evaluate_config_fn`: a function that returns a configuration dictionary which will be sent to the selected clients. The function will be called during each round and provides a convenient way to customize client-side evaluation from the server side, for example, to configure the number of validation steps performed. +Federated evaluation can be configured from the server side. Built-in strategies support +the following arguments: + +- ``fraction_evaluate``: a ``float`` defining the fraction of clients that will be + selected for evaluation. If ``fraction_evaluate`` is set to ``0.1`` and ``100`` + clients are connected to the server, then ``10`` will be randomly selected for + evaluation. If ``fraction_evaluate`` is set to ``0.0``, federated evaluation will be + disabled. +- ``min_evaluate_clients``: an ``int``: the minimum number of clients to be selected for + evaluation. If ``fraction_evaluate`` is set to ``0.1``, ``min_evaluate_clients`` is + set to 20, and ``100`` clients are connected to the server, then ``20`` clients will + be selected for evaluation. +- ``min_available_clients``: an ``int`` that defines the minimum number of clients which + need to be connected to the server before a round of federated evaluation can start. + If fewer than ``min_available_clients`` are connected to the server, the server will + wait until more clients are connected before it continues to sample clients for + evaluation. +- ``on_evaluate_config_fn``: a function that returns a configuration dictionary which + will be sent to the selected clients. The function will be called during each round + and provides a convenient way to customize client-side evaluation from the server + side, for example, to configure the number of validation steps performed. .. code-block:: python @@ -118,6 +140,7 @@ Federated evaluation can be configured from the server side. Built-in strategies val_steps = 5 if server_round < 4 else 10 return {"val_steps": val_steps} + # Create strategy strategy = fl.server.strategy.FedAvg( # ... other FedAvg arguments @@ -130,11 +153,11 @@ Federated evaluation can be configured from the server side. Built-in strategies # Start Flower server for four rounds of federated learning fl.server.start_server(server_address="[::]:8080", strategy=strategy) - Evaluating Local Model Updates During Training ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Model parameters can also be evaluated during training. :code:`Client.fit` can return arbitrary evaluation results as a dictionary: +Model parameters can also be evaluated during training. ``Client.fit`` can return +arbitrary evaluation results as a dictionary: .. code-block:: python @@ -146,6 +169,7 @@ Model parameters can also be evaluated during training. :code:`Client.fit` can r def get_parameters(self, config): # ... + pass def fit(self, parameters, config): """Train parameters on the locally held training set.""" @@ -171,9 +195,12 @@ Model parameters can also be evaluated during training. :code:`Client.fit` can r def evaluate(self, parameters, config): # ... - + pass Full Code Example ----------------- -For a full code example that uses both centralized and federated evaluation, see the *Advanced TensorFlow Example* (the same approach can be applied to workloads implemented in any other framework): https://github.com/adap/flower/tree/main/examples/advanced-tensorflow +For a full code example that uses both centralized and federated evaluation, see the +*Advanced TensorFlow Example* (the same approach can be applied to workloads implemented +in any other framework): +https://github.com/adap/flower/tree/main/examples/advanced-tensorflow diff --git a/doc/source/explanation-flower-architecture.rst b/doc/source/explanation-flower-architecture.rst index 22691d6091ac..e82da56dcefa 100644 --- a/doc/source/explanation-flower-architecture.rst +++ b/doc/source/explanation-flower-architecture.rst @@ -1,180 +1,158 @@ -##################### - Flower Architecture -##################### +Flower Architecture +=================== -This page explains the architecture of deployed Flower federated -learning system. +This page explains the architecture of deployed Flower federated learning system. -In federated learning (FL), there is typically one server and a number -of clients that are connected to the server. This is often called a -federation. +In federated learning (FL), there is typically one server and a number of clients that +are connected to the server. This is often called a federation. -The role of the server is to coordinate the training process. The role -of each client is to receive tasks from the server, execute those tasks -and return the results back to the server. +The role of the server is to coordinate the training process. The role of each client is +to receive tasks from the server, execute those tasks and return the results back to the +server. This is sometimes called a hub-and-spoke topology: .. figure:: ./_static/flower-architecture-hub-and-spoke.svg - :align: center - :width: 600 - :alt: Hub-and-spoke topology in federated learning - :class: no-scaled-link + :align: center + :width: 600 + :alt: Hub-and-spoke topology in federated learning + :class: no-scaled-link - Hub-and-spoke topology in federated learning (one server, multiple clients). + Hub-and-spoke topology in federated learning (one server, multiple clients). -In a real-world deployment, we typically want to run different projects -on such a federation. Each project could use different hyperparameters, -different model architectures, different aggregation strategies, or even -different machine learning frameworks like PyTorch and TensorFlow. +In a real-world deployment, we typically want to run different projects on such a +federation. Each project could use different hyperparameters, different model +architectures, different aggregation strategies, or even different machine learning +frameworks like PyTorch and TensorFlow. -This is why, in Flower, both the server side and the client side are -split into two parts. One part is long-lived and responsible for -communicating across the network, the other part is short-lived and -executes task-specific code. +This is why, in Flower, both the server side and the client side are split into two +parts. One part is long-lived and responsible for communicating across the network, the +other part is short-lived and executes task-specific code. A Flower `server` consists of **SuperLink** and ``ServerApp``: -- **SuperLink**: a long-running process that forwards task instructions - to clients (SuperNodes) and receives task results back. - -- ``ServerApp``: a short-lived process with project-spcific code that - customizes all server-side aspects of federated learning systems - (client selection, client configuration, result aggregation). This is - what AI researchers and AI engineers write when they build Flower - apps. +- **SuperLink**: a long-running process that forwards task instructions to clients + (SuperNodes) and receives task results back. +- ``ServerApp``: a short-lived process with project-spcific code that customizes all + server-side aspects of federated learning systems (client selection, client + configuration, result aggregation). This is what AI researchers and AI engineers write + when they build Flower apps. A Flower `client` consists of **SuperNode** and ``ClientApp``: -- **SuperNode**: a long-running process that connects to the SuperLink, - asks for tasks, executes tasks (for example, "train this model on - your local data") and returns task results back to the SuperLink. - -- ``ClientApp``: a short-lived process with project-specific code that - customizes all client-side aspects of federated learning systems - (local model training and evaluation, pre- and post-processing). This - is what AI researchers and AI engineers write when they build Flower - apps. +- **SuperNode**: a long-running process that connects to the SuperLink, asks for tasks, + executes tasks (for example, "train this model on your local data") and returns task + results back to the SuperLink. +- ``ClientApp``: a short-lived process with project-specific code that customizes all + client-side aspects of federated learning systems (local model training and + evaluation, pre- and post-processing). This is what AI researchers and AI engineers + write when they build Flower apps. -Why SuperNode and SuperLink? Well, in federated learning, the clients -are the actual stars of the show. They hold the training data and they -run the actual training. This is why Flower decided to name them -**SuperNode**. The **SuperLink** is then responsible for acting as the -`missing link` between all those SuperNodes. +Why SuperNode and SuperLink? Well, in federated learning, the clients are the actual +stars of the show. They hold the training data and they run the actual training. This is +why Flower decided to name them **SuperNode**. The **SuperLink** is then responsible for +acting as the `missing link` between all those SuperNodes. .. figure:: ./_static/flower-architecture-basic-architecture.svg - :align: center - :width: 600 - :alt: Basic Flower architecture - :class: no-scaled-link + :align: center + :width: 600 + :alt: Basic Flower architecture + :class: no-scaled-link - The basic Flower architecture for federated learning. + The basic Flower architecture for federated learning. -In a Flower app project, users will typically develop the ``ServerApp`` -and the ``ClientApp``. All the network communication between `server` -and `clients` is taken care of by the SuperLink and SuperNodes. +In a Flower app project, users will typically develop the ``ServerApp`` and the +``ClientApp``. All the network communication between `server` and `clients` is taken +care of by the SuperLink and SuperNodes. .. tip:: - For more details, please refer to the |serverapp_link|_ and - |clientapp_link|_ documentation. + For more details, please refer to the |serverapp_link|_ and |clientapp_link|_ + documentation. -With *multi-run*, multiple ``ServerApp``\s and ``ClientApp``\s are now -capable of running on the same federation consisting of a single -long-running SuperLink and multiple long-running SuperNodes. This is -sometimes referred to as `multi-tenancy` or `multi-job`. +With *multi-run*, multiple ``ServerApp``\s and ``ClientApp``\s are now capable of +running on the same federation consisting of a single long-running SuperLink and +multiple long-running SuperNodes. This is sometimes referred to as `multi-tenancy` or +`multi-job`. -As shown in the figure below, two projects, each consisting of a -``ServerApp`` and a ``ClientApp``, could share the same SuperLink and -SuperNodes. +As shown in the figure below, two projects, each consisting of a ``ServerApp`` and a +``ClientApp``, could share the same SuperLink and SuperNodes. .. figure:: ./_static/flower-architecture-multi-run.svg - :align: center - :width: 600 - :alt: Multi-tenancy federated learning architecture - :class: no-scaled-link + :align: center + :width: 600 + :alt: Multi-tenancy federated learning architecture + :class: no-scaled-link - Multi-tenancy federated learning architecture with Flower + Multi-tenancy federated learning architecture with Flower -To illustrate how multi-run works, consider one federated learning -training run where a ``ServerApp`` and a ``ClientApp`` are participating -in ``[run 1]``. Note that a SuperNode will only run a ``ClientApp`` if -it is selected to participate in the training run. +To illustrate how multi-run works, consider one federated learning training run where a +``ServerApp`` and a ``ClientApp`` are participating in ``[run 1]``. Note that a +SuperNode will only run a ``ClientApp`` if it is selected to participate in the training +run. -In ``[run 1]`` below, all the SuperNodes are selected and therefore run -their corresponding ``ClientApp``\s: +In ``[run 1]`` below, all the SuperNodes are selected and therefore run their +corresponding ``ClientApp``\s: .. figure:: ./_static/flower-architecture-multi-run-1.svg - :align: center - :width: 600 - :alt: Multi-tenancy federated learning architecture - Run 1 - :class: no-scaled-link + :align: center + :width: 600 + :alt: Multi-tenancy federated learning architecture - Run 1 + :class: no-scaled-link - Run 1 in a multi-run federated learning architecture with Flower. - All SuperNodes participate in the training round. + Run 1 in a multi-run federated learning architecture with Flower. All SuperNodes + participate in the training round. -However, in ``[run 2]``, only the first and third SuperNodes are -selected to participate in the training: +However, in ``[run 2]``, only the first and third SuperNodes are selected to participate +in the training: .. figure:: ./_static/flower-architecture-multi-run-2.svg - :align: center - :width: 600 - :alt: Multi-tenancy federated learning architecture - Run 2 - :class: no-scaled-link - - Run 2 in a multi-run federated learning architecture with Flower. - Only the first and third SuperNodes are selected to participate in the - training round. - -Therefore, with Flower multi-run, different projects (each consisting of -a ``ServerApp`` and ``ClientApp``) can run on different sets of clients. - -To help you start and manage all of the concurrently executing training -runs, Flower offers one additional long-running server-side service -called **SuperExec**. When you type ``flwr run`` to start a new training -run, the ``flwr`` CLI bundles your local project (mainly your -``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. The -**SuperExec** will then take care of starting and managing your -``ServerApp``, which in turn selects SuperNodes to execute your -``ClientApp``. - -This architecture allows many users to (concurrently) run their projects -on the same federation, simply by typing ``flwr run`` on their local -developer machine. + :align: center + :width: 600 + :alt: Multi-tenancy federated learning architecture - Run 2 + :class: no-scaled-link + + Run 2 in a multi-run federated learning architecture with Flower. Only the first and + third SuperNodes are selected to participate in the training round. + +Therefore, with Flower multi-run, different projects (each consisting of a ``ServerApp`` +and ``ClientApp``) can run on different sets of clients. + +To help you start and manage all of the concurrently executing training runs, Flower +offers one additional long-running server-side service called **SuperExec**. When you +type ``flwr run`` to start a new training run, the ``flwr`` CLI bundles your local +project (mainly your ``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. +The **SuperExec** will then take care of starting and managing your ``ServerApp``, which +in turn selects SuperNodes to execute your ``ClientApp``. + +This architecture allows many users to (concurrently) run their projects on the same +federation, simply by typing ``flwr run`` on their local developer machine. .. figure:: ./_static/flower-architecture-deployment-engine.svg - :align: center - :width: 800 - :alt: Flower Deployment Engine with SuperExec - :class: no-scaled-link + :align: center + :width: 800 + :alt: Flower Deployment Engine with SuperExec + :class: no-scaled-link - The SuperExec service for managing concurrent training runs in - Flower. + The SuperExec service for managing concurrent training runs in Flower. .. note:: - This explanation covers the Flower Deployment Engine. An explanation - covering the Flower Simulation Engine will follow. + This explanation covers the Flower Deployment Engine. An explanation covering the + Flower Simulation Engine will follow. .. important:: - As we continue to enhance Flower at a rapid pace, we'll periodically - update this explainer document. Feel free to share any feedback with - us. - -.. |clientapp_link| replace:: + As we continue to enhance Flower at a rapid pace, we'll periodically update this + explainer document. Feel free to share any feedback with us. - ``ClientApp`` +.. |clientapp_link| replace:: ``ClientApp`` -.. |serverapp_link| replace:: - - ``ServerApp`` +.. |serverapp_link| replace:: ``ServerApp`` .. _clientapp_link: ref-api/flwr.client.ClientApp.html .. _serverapp_link: ref-api/flwr.server.ServerApp.html -.. title:: Flower federated learning architecture - .. meta:: - :description: Explore the federated learning architecture of the Flower framework, featuring multi-run, concurrent execution, and scalable, secure machine learning while preserving data privacy. + :description: Explore the federated learning architecture of the Flower framework, featuring multi-run, concurrent execution, and scalable, secure machine learning while preserving data privacy. diff --git a/doc/source/how-to-aggregate-evaluation-results.rst b/doc/source/how-to-aggregate-evaluation-results.rst index fa4ba88b8ff0..be6e20068c88 100644 --- a/doc/source/how-to-aggregate-evaluation-results.rst +++ b/doc/source/how-to-aggregate-evaluation-results.rst @@ -1,14 +1,15 @@ Aggregate evaluation results ============================ -The Flower server does not prescribe a way to aggregate evaluation results, but it enables the user to fully customize result aggregation. - +The Flower server does not prescribe a way to aggregate evaluation results, but it +enables the user to fully customize result aggregation. Aggregate Custom Evaluation Results ----------------------------------- -The same :code:`Strategy`-customization approach can be used to aggregate custom evaluation results coming from individual clients. -Clients can return custom metrics to the server by returning a dictionary: +The same ``Strategy``-customization approach can be used to aggregate custom evaluation +results coming from individual clients. Clients can return custom metrics to the server +by returning a dictionary: .. code-block:: python @@ -16,9 +17,11 @@ Clients can return custom metrics to the server by returning a dictionary: def get_parameters(self, config): # ... + pass def fit(self, parameters, config): # ... + pass def evaluate(self, parameters, config): """Evaluate parameters on the locally held test set.""" @@ -33,7 +36,8 @@ Clients can return custom metrics to the server by returning a dictionary: num_examples_test = len(self.x_test) return loss, num_examples_test, {"accuracy": accuracy} -The server can then use a customized strategy to aggregate the metrics provided in these dictionaries: +The server can then use a customized strategy to aggregate the metrics provided in these +dictionaries: .. code-block:: python @@ -50,7 +54,9 @@ The server can then use a customized strategy to aggregate the metrics provided return None, {} # Call aggregate_evaluate from base class (FedAvg) to aggregate loss and metrics - aggregated_loss, aggregated_metrics = super().aggregate_evaluate(server_round, results, failures) + aggregated_loss, aggregated_metrics = super().aggregate_evaluate( + server_round, results, failures + ) # Weigh accuracy of each client by number of examples used accuracies = [r.metrics["accuracy"] * r.num_examples for _, r in results] @@ -58,11 +64,14 @@ The server can then use a customized strategy to aggregate the metrics provided # Aggregate and print custom metric aggregated_accuracy = sum(accuracies) / sum(examples) - print(f"Round {server_round} accuracy aggregated from client results: {aggregated_accuracy}") + print( + f"Round {server_round} accuracy aggregated from client results: {aggregated_accuracy}" + ) # Return aggregated loss and metrics (i.e., aggregated accuracy) return aggregated_loss, {"accuracy": aggregated_accuracy} + # Create strategy and run server strategy = AggregateCustomMetricStrategy( # (same arguments as FedAvg here) diff --git a/doc/source/how-to-authenticate-supernodes.rst b/doc/source/how-to-authenticate-supernodes.rst index 9b001531ee33..a2dd499dbc10 100644 --- a/doc/source/how-to-authenticate-supernodes.rst +++ b/doc/source/how-to-authenticate-supernodes.rst @@ -1,29 +1,38 @@ Authenticate SuperNodes ======================= -Flower has built-in support for authenticated SuperNodes that you can use to verify the identities of each SuperNode connecting to a SuperLink. -Flower node authentication works similar to how GitHub SSH authentication works: +Flower has built-in support for authenticated SuperNodes that you can use to verify the +identities of each SuperNode connecting to a SuperLink. Flower node authentication works +similar to how GitHub SSH authentication works: -* SuperLink (server) stores a list of known (client) node public keys -* Using ECDH, both SuperNode and SuperLink independently derive a shared secret -* Shared secret is used to compute the HMAC value of the message sent from SuperNode to SuperLink as a token -* SuperLink verifies the token +- SuperLink (server) stores a list of known (client) node public keys +- Using ECDH, both SuperNode and SuperLink independently derive a shared secret +- Shared secret is used to compute the HMAC value of the message sent from SuperNode to + SuperLink as a token +- SuperLink verifies the token -We recommend you to check out the complete `code example `_ demonstrating federated learning with Flower in an authenticated setting. +We recommend you to check out the complete `code example +`_ +demonstrating federated learning with Flower in an authenticated setting. .. note:: + This guide covers a preview feature that might change in future versions of Flower. .. note:: - For increased security, node authentication can only be used when encrypted connections (SSL/TLS) are enabled. -Enable node authentication in :code:`SuperLink` ------------------------------------------------ + For increased security, node authentication can only be used when encrypted + connections (SSL/TLS) are enabled. + +Enable node authentication in ``SuperLink`` +------------------------------------------- -To enable node authentication, first you need to configure SSL/TLS connections to secure the SuperLink<>SuperNode communication. You can find the complete guide -`here `_. -After configuring secure connections, you can enable client authentication in a long-running Flower :code:`SuperLink`. -Use the following terminal command to start a Flower :code:`SuperNode` that has both secure connections and node authentication enabled: +To enable node authentication, first you need to configure SSL/TLS connections to secure +the SuperLink<>SuperNode communication. You can find the complete guide `here +`_. After +configuring secure connections, you can enable client authentication in a long-running +Flower ``SuperLink``. Use the following terminal command to start a Flower ``SuperNode`` +that has both secure connections and node authentication enabled: .. code-block:: bash @@ -37,43 +46,59 @@ Use the following terminal command to start a Flower :code:`SuperNode` that has Let's break down the authentication flags: -1. The first flag :code:`--auth-list-public-keys` expects a path to a CSV file storing all known node public keys. You need to store all known node public keys that are allowed to participate in a federation in one CSV file (:code:`.csv`). +1. The first flag ``--auth-list-public-keys`` expects a path to a CSV file storing all + known node public keys. You need to store all known node public keys that are allowed + to participate in a federation in one CSV file (``.csv``). - A valid CSV file storing known node public keys should list the keys in OpenSSH format, separated by commas and without any comments. For an example, refer to our code sample, which contains a CSV file with two known node public keys. + A valid CSV file storing known node public keys should list the keys in OpenSSH + format, separated by commas and without any comments. For an example, refer to + our code sample, which contains a CSV file with two known node public keys. -2. The second and third flags :code:`--auth-superlink-private-key` and :code:`--auth-superlink-public-key` expect paths to the server's private and public keys. For development purposes, you can generate a private and public key pair using :code:`ssh-keygen -t ecdsa -b 384`. +2. The second and third flags ``--auth-superlink-private-key`` and + ``--auth-superlink-public-key`` expect paths to the server's private and public keys. + For development purposes, you can generate a private and public key pair using + ``ssh-keygen -t ecdsa -b 384``. .. note:: - In Flower 1.9, there is no support for dynamically removing, editing, or adding known node public keys to the SuperLink. - To change the set of known nodes, you need to shut the server down, edit the CSV file, and start the server again. - Support for dynamically changing the set of known nodes is on the roadmap to be released in Flower 1.10 (ETA: June). + In Flower 1.9, there is no support for dynamically removing, editing, or adding + known node public keys to the SuperLink. To change the set of known nodes, you need + to shut the server down, edit the CSV file, and start the server again. Support for + dynamically changing the set of known nodes is on the roadmap to be released in + Flower 1.10 (ETA: June). -Enable node authentication in :code:`SuperNode` -------------------------------------------------- +Enable node authentication in ``SuperNode`` +------------------------------------------- -Similar to the long-running Flower server (:code:`SuperLink`), you can easily enable node authentication in the long-running Flower client (:code:`SuperNode`). -Use the following terminal command to start an authenticated :code:`SuperNode`: +Similar to the long-running Flower server (``SuperLink``), you can easily enable node +authentication in the long-running Flower client (``SuperNode``). Use the following +terminal command to start an authenticated ``SuperNode``: .. code-block:: bash - flower-supernode - --root-certificates certificates/ca.crt - --superlink 127.0.0.1:9092 - --auth-supernode-private-key keys/client_credentials - --auth-supernode-public-key keys/client_credentials.pub - -The :code:`--auth-supernode-private-key` flag expects a path to the node's private key file and the :code:`--auth-supernode-public-key` flag expects a path to the node's public key file. For development purposes, you can generate a private and public key pair using :code:`ssh-keygen -t ecdsa -b 384`. + flower-supernode + --root-certificates certificates/ca.crt + --superlink 127.0.0.1:9092 + --auth-supernode-private-key keys/client_credentials + --auth-supernode-public-key keys/client_credentials.pub +The ``--auth-supernode-private-key`` flag expects a path to the node's private key file +and the ``--auth-supernode-public-key`` flag expects a path to the node's public key +file. For development purposes, you can generate a private and public key pair using +``ssh-keygen -t ecdsa -b 384``. Security notice --------------- -The system's security relies on the credentials of the SuperLink and each SuperNode. Therefore, it is imperative to safeguard and safely store the credentials to avoid security risks such as Public Key Infrastructure (PKI) impersonation attacks. -The node authentication mechanism also involves human interaction, so please ensure that all of the communication is done in a secure manner, using trusted communication methods. - +The system's security relies on the credentials of the SuperLink and each SuperNode. +Therefore, it is imperative to safeguard and safely store the credentials to avoid +security risks such as Public Key Infrastructure (PKI) impersonation attacks. The node +authentication mechanism also involves human interaction, so please ensure that all of +the communication is done in a secure manner, using trusted communication methods. Conclusion ---------- -You should now have learned how to start a long-running Flower server (:code:`SuperLink`) and client (:code:`SuperNode`) with node authentication enabled. You should also know the significance of the private key and store it safely to minimize security risks. +You should now have learned how to start a long-running Flower server (``SuperLink``) +and client (``SuperNode``) with node authentication enabled. You should also know the +significance of the private key and store it safely to minimize security risks. diff --git a/doc/source/how-to-configure-clients.rst b/doc/source/how-to-configure-clients.rst index ff0a2f4033df..c950ab3be9e7 100644 --- a/doc/source/how-to-configure-clients.rst +++ b/doc/source/how-to-configure-clients.rst @@ -1,37 +1,55 @@ Configure clients ================= -Along with model parameters, Flower can send configuration values to clients. Configuration values can be used for various purposes. They are, for example, a popular way to control client-side hyperparameters from the server. +Along with model parameters, Flower can send configuration values to clients. +Configuration values can be used for various purposes. They are, for example, a popular +way to control client-side hyperparameters from the server. Configuration values -------------------- -Configuration values are represented as a dictionary with ``str`` keys and values of type ``bool``, ``bytes``, ``double`` (64-bit precision float), ``int``, or ``str`` (or equivalent types in different languages). Here is an example of a configuration dictionary in Python: +Configuration values are represented as a dictionary with ``str`` keys and values of +type ``bool``, ``bytes``, ``double`` (64-bit precision float), ``int``, or ``str`` (or +equivalent types in different languages). Here is an example of a configuration +dictionary in Python: .. code-block:: python config_dict = { - "dropout": True, # str key, bool value + "dropout": True, # str key, bool value "learning_rate": 0.01, # str key, float value - "batch_size": 32, # str key, int value - "optimizer": "sgd", # str key, str value + "batch_size": 32, # str key, int value + "optimizer": "sgd", # str key, str value } -Flower serializes these configuration dictionaries (or *config dict* for short) to their ProtoBuf representation, transports them to the client using gRPC, and then deserializes them back to Python dictionaries. +Flower serializes these configuration dictionaries (or *config dict* for short) to their +ProtoBuf representation, transports them to the client using gRPC, and then deserializes +them back to Python dictionaries. .. note:: - Currently, there is no support for directly sending collection types (e.g., ``Set``, ``List``, ``Map``) as values in configuration dictionaries. There are several workarounds to send collections as values by converting them to one of the supported value types (and converting them back on the client-side). - - One can, for example, convert a list of floating-point numbers to a JSON string, then send the JSON string using the configuration dictionary, and then convert the JSON string back to a list of floating-point numbers on the client. + Currently, there is no support for directly sending collection types (e.g., ``Set``, + ``List``, ``Map``) as values in configuration dictionaries. There are several + workarounds to send collections as values by converting them to one of the supported + value types (and converting them back on the client-side). + One can, for example, convert a list of floating-point numbers to a JSON string, + then send the JSON string using the configuration dictionary, and then convert the + JSON string back to a list of floating-point numbers on the client. Configuration through built-in strategies ----------------------------------------- -The easiest way to send configuration values to clients is to use a built-in strategy like :code:`FedAvg`. Built-in strategies support so-called configuration functions. A configuration function is a function that the built-in strategy calls to get the configuration dictionary for the current round. It then forwards the configuration dictionary to all the clients selected during that round. +The easiest way to send configuration values to clients is to use a built-in strategy +like ``FedAvg``. Built-in strategies support so-called configuration functions. A +configuration function is a function that the built-in strategy calls to get the +configuration dictionary for the current round. It then forwards the configuration +dictionary to all the clients selected during that round. -Let's start with a simple example. Imagine we want to send (a) the batch size that the client should use, (b) the current global round of federated learning, and (c) the number of epochs to train on the client-side. Our configuration function could look like this: +Let's start with a simple example. Imagine we want to send (a) the batch size that the +client should use, (b) the current global round of federated learning, and (c) the +number of epochs to train on the client-side. Our configuration function could look like +this: .. code-block:: python @@ -44,12 +62,13 @@ Let's start with a simple example. Imagine we want to send (a) the batch size th } return config -To make the built-in strategies use this function, we can pass it to ``FedAvg`` during initialization using the parameter :code:`on_fit_config_fn`: +To make the built-in strategies use this function, we can pass it to ``FedAvg`` during +initialization using the parameter ``on_fit_config_fn``: .. code-block:: python strategy = FedAvg( - ..., # Other FedAvg parameters + ..., # Other FedAvg parameters on_fit_config_fn=fit_config, # The fit_config function we defined earlier ) @@ -64,9 +83,15 @@ One the client side, we receive the configuration dictionary in ``fit``: print(config["local_epochs"]) # Prints `2` # ... (rest of `fit` method) -There is also an `on_evaluate_config_fn` to configure evaluation, which works the same way. They are separate functions because one might want to send different configuration values to `evaluate` (for example, to use a different batch size). +There is also an `on_evaluate_config_fn` to configure evaluation, which works the same +way. They are separate functions because one might want to send different configuration +values to `evaluate` (for example, to use a different batch size). -The built-in strategies call this function every round (that is, every time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). Calling `on_evaluate_config_fn` every round allows us to vary/change the config dict over consecutive rounds. If we wanted to implement a hyperparameter schedule, for example, to increase the number of local epochs during later rounds, we could do the following: +The built-in strategies call this function every round (that is, every time +`Strategy.configure_fit` or `Strategy.configure_evaluate` runs). Calling +`on_evaluate_config_fn` every round allows us to vary/change the config dict over +consecutive rounds. If we wanted to implement a hyperparameter schedule, for example, to +increase the number of local epochs during later rounds, we could do the following: .. code-block:: python @@ -79,14 +104,19 @@ The built-in strategies call this function every round (that is, every time `Str } return config -The :code:`FedAvg` strategy will call this function *every round*. +The ``FedAvg`` strategy will call this function *every round*. Configuring individual clients ------------------------------ -In some cases, it is necessary to send different configuration values to different clients. +In some cases, it is necessary to send different configuration values to different +clients. -This can be achieved by customizing an existing strategy or by :doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg` by adding a custom ``"hello": "world"`` configuration key/value pair to the config dict of a *single client* (only the first client in the list, the other clients in this round to not receive this "special" config value): +This can be achieved by customizing an existing strategy or by :doc:`implementing a +custom strategy from scratch `. Here's a nonsensical +example that customizes ``FedAvg`` by adding a custom ``"hello": "world"`` configuration +key/value pair to the config dict of a *single client* (only the first client in the +list, the other clients in this round to not receive this "special" config value): .. code-block:: python @@ -94,7 +124,9 @@ This can be achieved by customizing an existing strategy or by :doc:`implementin def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager ) -> List[Tuple[ClientProxy, FitIns]]: - client_instructions = super().configure_fit(server_round, parameters, client_manager) + client_instructions = super().configure_fit( + server_round, parameters, client_manager + ) # Add special "hello": "world" config key/value pair, # but only to the first client in the list @@ -103,6 +135,7 @@ This can be achieved by customizing an existing strategy or by :doc:`implementin return client_instructions + # Create strategy and run server strategy = CustomClientConfigStrategy( # ... (same arguments as plain FedAvg here) diff --git a/doc/source/how-to-configure-logging.rst b/doc/source/how-to-configure-logging.rst index d5559429a73c..bb7461390b42 100644 --- a/doc/source/how-to-configure-logging.rst +++ b/doc/source/how-to-configure-logging.rst @@ -1,17 +1,19 @@ Configure logging ================= -The Flower logger keeps track of all core events that take place in federated learning workloads. -It presents information by default following a standard message format: +The Flower logger keeps track of all core events that take place in federated learning +workloads. It presents information by default following a standard message format: .. code-block:: python DEFAULT_FORMATTER = logging.Formatter( - "%(levelname)s %(name)s %(asctime)s | %(filename)s:%(lineno)d | %(message)s" + "%(levelname)s %(name)s %(asctime)s | %(filename)s:%(lineno)d | %(message)s" ) -containing relevant information including: log message level (e.g. :code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging took place from, as well as the log message itself. -In this way, the logger would typically display information on your terminal as follows: +containing relevant information including: log message level (e.g. ``INFO``, ``DEBUG``), +a timestamp, the line where the logging took place from, as well as the log message +itself. In this way, the logger would typically display information on your terminal as +follows: .. code-block:: bash @@ -29,29 +31,35 @@ In this way, the logger would typically display information on your terminal as INFO flwr 2023-07-15 15:32:36,118 | server.py:125 | fit progress: (5, 358.6936808824539, {'accuracy': 0.3467}, 18.964264554999318) ... - Saving log to file -------------------- +------------------ -By default, the Flower log is outputted to the terminal where you launch your Federated Learning workload from. This applies for both gRPC-based federation (i.e. when you do :code:`fl.server.start_server`) and when using the :code:`VirtualClientEngine` (i.e. when you do :code:`fl.simulation.start_simulation`). -In some situations you might want to save this log to disk. You can do so by calling the `fl.common.logger.configure() `_ function. For example: +By default, the Flower log is outputted to the terminal where you launch your Federated +Learning workload from. This applies for both gRPC-based federation (i.e. when you do +``fl.server.start_server``) and when using the ``VirtualClientEngine`` (i.e. when you do +``fl.simulation.start_simulation``). In some situations you might want to save this log +to disk. You can do so by calling the `fl.common.logger.configure() +`_ function. For +example: .. code-block:: python - - import flwr as fl - - ... - # in your main file and before launching your experiment - # add an identifier to your logger - # then specify the name of the file where the log should be outputted to - fl.common.logger.configure(identifier="myFlowerExperiment", filename="log.txt") + import flwr as fl + + ... - # then start your workload - fl.simulation.start_simulation(...) # or fl.server.start_server(...) + # in your main file and before launching your experiment + # add an identifier to your logger + # then specify the name of the file where the log should be outputted to + fl.common.logger.configure(identifier="myFlowerExperiment", filename="log.txt") -With the above, Flower will record the log you see on your terminal to :code:`log.txt`. This file will be created in the same directory as were you are running the code from. -If we inspect we see the log above is also recorded but prefixing with :code:`identifier` each line: + # then start your workload + fl.simulation.start_simulation(...) # or fl.server.start_server(...) + +With the above, Flower will record the log you see on your terminal to ``log.txt``. This +file will be created in the same directory as were you are running the code from. If we +inspect we see the log above is also recorded but prefixing with ``identifier`` each +line: .. code-block:: bash @@ -69,12 +77,11 @@ If we inspect we see the log above is also recorded but prefixing with :code:`id myFlowerExperiment | INFO flwr 2023-07-15 15:32:36,118 | server.py:125 | fit progress: (5, 358.6936808824539, {'accuracy': 0.3467}, 18.964264554999318) ... - Log your own messages --------------------- -You might expand the information shown by default with the Flower logger by adding more messages relevant to your application. -You can achieve this easily as follows. +You might expand the information shown by default with the Flower logger by adding more +messages relevant to your application. You can achieve this easily as follows. .. code-block:: python @@ -84,25 +91,31 @@ You can achieve this easily as follows. # For example, let's say you want to add to the log some info about the training on your client for debugging purposes + class FlowerClient(fl.client.NumPyClient): - def __init__(self, cid: int ...): + def __init__( + self, + cid: int, + # ... + ): self.cid = cid - self.net = ... - ... + self.net = net + # ... def fit(self, parameters, config): log(INFO, f"Printing a custom INFO message at the start of fit() :)") - + set_params(self.net, parameters) log(DEBUG, f"Client {self.cid} is doing fit() with config: {config}") - ... + # ... -In this way your logger will show, in addition to the default messages, the ones introduced by the clients as specified above. +In this way your logger will show, in addition to the default messages, the ones +introduced by the clients as specified above. .. code-block:: bash - + ... INFO flwr 2023-07-15 16:18:21,726 | server.py:89 | Initializing global parameters INFO flwr 2023-07-15 16:18:21,726 | server.py:276 | Requesting initial parameters from one random client @@ -123,10 +136,13 @@ In this way your logger will show, in addition to the default messages, the ones DEBUG flwr 2023-07-15 16:18:28,617 | main.py:63 | Client 13 is doing fit() with config: {'epochs': 5, 'batch_size': 64} ... - Log to a remote service ----------------------- -The :code:`fl.common.logger.configure` function, also allows specifying a host to which logs can be pushed (via :code:`POST`) through a native Python :code:`logging.handler.HTTPHandler`. -This is a particularly useful feature in :code:`gRPC`-based Federated Learning workloads where otherwise gathering logs from all entities (i.e. the server and the clients) might be cumbersome. -Note that in Flower simulation, the server automatically displays all logs. You can still specify a :code:`HTTPHandler` should you wish to backup or analyze the logs somewhere else. +The ``fl.common.logger.configure`` function, also allows specifying a host to which logs +can be pushed (via ``POST``) through a native Python ``logging.handler.HTTPHandler``. +This is a particularly useful feature in ``gRPC``-based Federated Learning workloads +where otherwise gathering logs from all entities (i.e. the server and the clients) might +be cumbersome. Note that in Flower simulation, the server automatically displays all +logs. You can still specify a ``HTTPHandler`` should you wish to backup or analyze the +logs somewhere else. diff --git a/doc/source/how-to-enable-ssl-connections.rst b/doc/source/how-to-enable-ssl-connections.rst index fc8e89914ac2..cd8590bc3436 100644 --- a/doc/source/how-to-enable-ssl-connections.rst +++ b/doc/source/how-to-enable-ssl-connections.rst @@ -1,44 +1,46 @@ Enable SSL connections ====================== -This guide describes how to a SSL-enabled secure Flower server (:code:`SuperLink`) can be started and -how a Flower client (:code:`SuperNode`) can establish a secure connections to it. +This guide describes how to a SSL-enabled secure Flower server (``SuperLink``) can be +started and how a Flower client (``SuperNode``) can establish a secure connections to +it. -A complete code example demonstrating a secure connection can be found -`here `_. - -The code example comes with a :code:`README.md` file which explains how to start it. Although it is -already SSL-enabled, it might be less descriptive on how it does so. Stick to this guide for a deeper -introduction to the topic. +A complete code example demonstrating a secure connection can be found `here +`_. +The code example comes with a ``README.md`` file which explains how to start it. +Although it is already SSL-enabled, it might be less descriptive on how it does so. +Stick to this guide for a deeper introduction to the topic. Certificates ------------ -Using SSL-enabled connections requires certificates to be passed to the server and client. For -the purpose of this guide we are going to generate self-signed certificates. As this can become -quite complex we are going to ask you to run the script in -:code:`examples/advanced-tensorflow/certificates/generate.sh` -with the following command sequence: +Using SSL-enabled connections requires certificates to be passed to the server and +client. For the purpose of this guide we are going to generate self-signed certificates. +As this can become quite complex we are going to ask you to run the script in +``examples/advanced-tensorflow/certificates/generate.sh`` with the following command +sequence: .. code-block:: bash - cd examples/advanced-tensorflow/certificates - ./generate.sh - -This will generate the certificates in :code:`examples/advanced-tensorflow/.cache/certificates`. + cd examples/advanced-tensorflow/certificates + ./generate.sh -The approach for generating SSL certificates in the context of this example can serve as an inspiration and -starting point, but it should not be used as a reference for production environments. Please refer to other -sources regarding the issue of correctly generating certificates for production environments. -For non-critical prototyping or research projects, it might be sufficient to use the self-signed certificates generated using -the scripts mentioned in this guide. +This will generate the certificates in +``examples/advanced-tensorflow/.cache/certificates``. +The approach for generating SSL certificates in the context of this example can serve as +an inspiration and starting point, but it should not be used as a reference for +production environments. Please refer to other sources regarding the issue of correctly +generating certificates for production environments. For non-critical prototyping or +research projects, it might be sufficient to use the self-signed certificates generated +using the scripts mentioned in this guide. Server (SuperLink) ------------------ -Use the following terminal command to start a sever (SuperLink) that uses the previously generated certificates: +Use the following terminal command to start a sever (SuperLink) that uses the previously +generated certificates: .. code-block:: bash @@ -47,34 +49,36 @@ Use the following terminal command to start a sever (SuperLink) that uses the pr --ssl-certfile certificates/server.pem --ssl-keyfile certificates/server.key -When providing certificates, the server expects a tuple of three certificates paths: CA certificate, server certificate and server private key. - +When providing certificates, the server expects a tuple of three certificates paths: CA +certificate, server certificate and server private key. Client (SuperNode) ------------------ -Use the following terminal command to start a client (SuperNode) that uses the previously generated certificates: +Use the following terminal command to start a client (SuperNode) that uses the +previously generated certificates: .. code-block:: bash - flower-supernode - --root-certificates certificates/ca.crt - --superlink 127.0.0.1:9092 - -When setting :code:`root_certificates`, the client expects a file path to PEM-encoded root certificates. + flower-supernode + --root-certificates certificates/ca.crt + --superlink 127.0.0.1:9092 +When setting ``root_certificates``, the client expects a file path to PEM-encoded root +certificates. Conclusion ---------- -You should now have learned how to generate self-signed certificates using the given script, start an -SSL-enabled server and have a client establish a secure connection to it. - +You should now have learned how to generate self-signed certificates using the given +script, start an SSL-enabled server and have a client establish a secure connection to +it. Additional resources -------------------- -These additional sources might be relevant if you would like to dive deeper into the topic of certificates: +These additional sources might be relevant if you would like to dive deeper into the +topic of certificates: -* `Let's Encrypt `_ -* `certbot `_ +- `Let's Encrypt `_ +- `certbot `_ diff --git a/doc/source/how-to-implement-strategies.rst b/doc/source/how-to-implement-strategies.rst index 01bbb3042973..075d8a0116c4 100644 --- a/doc/source/how-to-implement-strategies.rst +++ b/doc/source/how-to-implement-strategies.rst @@ -1,22 +1,21 @@ Implement strategies ==================== -The strategy abstraction enables implementation of fully custom strategies. A -strategy is basically the federated learning algorithm that runs on the server. -Strategies decide how to sample clients, how to configure clients for training, -how to aggregate updates, and how to evaluate models. Flower provides a few -built-in strategies which are based on the same API described below. +The strategy abstraction enables implementation of fully custom strategies. A strategy +is basically the federated learning algorithm that runs on the server. Strategies decide +how to sample clients, how to configure clients for training, how to aggregate updates, +and how to evaluate models. Flower provides a few built-in strategies which are based on +the same API described below. -The :code:`Strategy` abstraction --------------------------------- +The ``Strategy`` abstraction +---------------------------- All strategy implementation are derived from the abstract base class -:code:`flwr.server.strategy.Strategy`, both built-in implementations and third -party implementations. This means that custom strategy implementations have the -exact same capabilities at their disposal as built-in ones. +``flwr.server.strategy.Strategy``, both built-in implementations and third party +implementations. This means that custom strategy implementations have the exact same +capabilities at their disposal as built-in ones. -The strategy abstraction defines a few abstract methods that need to be -implemented: +The strategy abstraction defines a few abstract methods that need to be implemented: .. code-block:: python @@ -31,10 +30,7 @@ implemented: @abstractmethod def configure_fit( - self, - server_round: int, - parameters: Parameters, - client_manager: ClientManager + self, server_round: int, parameters: Parameters, client_manager: ClientManager ) -> List[Tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" @@ -49,10 +45,7 @@ implemented: @abstractmethod def configure_evaluate( - self, - server_round: int, - parameters: Parameters, - client_manager: ClientManager + self, server_round: int, parameters: Parameters, client_manager: ClientManager ) -> List[Tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" @@ -71,31 +64,35 @@ implemented: ) -> Optional[Tuple[float, Dict[str, Scalar]]]: """Evaluate the current model parameters.""" - -Creating a new strategy means implementing a new :code:`class` (derived from the -abstract base class :code:`Strategy`) that implements for the previously shown -abstract methods: +Creating a new strategy means implementing a new ``class`` (derived from the abstract +base class ``Strategy``) that implements for the previously shown abstract methods: .. code-block:: python class SotaStrategy(Strategy): def initialize_parameters(self, client_manager): # Your implementation here + pass def configure_fit(self, server_round, parameters, client_manager): # Your implementation here + pass def aggregate_fit(self, server_round, results, failures): # Your implementation here + pass def configure_evaluate(self, server_round, parameters, client_manager): # Your implementation here + pass def aggregate_evaluate(self, server_round, results, failures): # Your implementation here + pass def evaluate(self, parameters): # Your implementation here + pass The Flower server calls these methods in the following order: @@ -176,12 +173,15 @@ The Flower server calls these methods in the following order: The following sections describe each of those methods in more detail. -The :code:`initialize_parameters` method ----------------------------------------- +The ``initialize_parameters`` method +------------------------------------ -:code:`initialize_parameters` is called only once, at the very beginning of an execution. It is responsible for providing the initial global model parameters in a serialized form (i.e., as a :code:`Parameters` object). +``initialize_parameters`` is called only once, at the very beginning of an execution. It +is responsible for providing the initial global model parameters in a serialized form +(i.e., as a ``Parameters`` object). -Built-in strategies return user-provided initial parameters. The following example shows how initial parameters can be passed to :code:`FedAvg`: +Built-in strategies return user-provided initial parameters. The following example shows +how initial parameters can be passed to ``FedAvg``: .. code-block:: python @@ -200,49 +200,68 @@ Built-in strategies return user-provided initial parameters. The following examp # Serialize ndarrays to `Parameters` parameters = fl.common.ndarrays_to_parameters(weights) - # Use the serialized parameters as the initial global parameters + # Use the serialized parameters as the initial global parameters strategy = fl.server.strategy.FedAvg( initial_parameters=parameters, ) fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3), strategy=strategy) -The Flower server will call :code:`initialize_parameters`, which either returns the parameters that were passed to :code:`initial_parameters`, or :code:`None`. If no parameters are returned from :code:`initialize_parameters` (i.e., :code:`None`), the server will randomly select one client and ask it to provide its parameters. This is a convenience feature and not recommended in practice, but it can be useful for prototyping. In practice, it is recommended to always use server-side parameter initialization. +The Flower server will call ``initialize_parameters``, which either returns the +parameters that were passed to ``initial_parameters``, or ``None``. If no parameters are +returned from ``initialize_parameters`` (i.e., ``None``), the server will randomly +select one client and ask it to provide its parameters. This is a convenience feature +and not recommended in practice, but it can be useful for prototyping. In practice, it +is recommended to always use server-side parameter initialization. .. note:: - Server-side parameter initialization is a powerful mechanism. It can be used, for example, to resume training from a previously saved checkpoint. It is also the fundamental capability needed to implement hybrid approaches, for example, to fine-tune a pre-trained model using federated learning. + Server-side parameter initialization is a powerful mechanism. It can be used, for + example, to resume training from a previously saved checkpoint. It is also the + fundamental capability needed to implement hybrid approaches, for example, to + fine-tune a pre-trained model using federated learning. -The :code:`configure_fit` method --------------------------------- +The ``configure_fit`` method +---------------------------- -:code:`configure_fit` is responsible for configuring the upcoming round of training. What does *configure* mean in this context? Configuring a round means selecting clients and deciding what instructions to send to these clients. The signature of :code:`configure_fit` makes this clear: +``configure_fit`` is responsible for configuring the upcoming round of training. What +does *configure* mean in this context? Configuring a round means selecting clients and +deciding what instructions to send to these clients. The signature of ``configure_fit`` +makes this clear: .. code-block:: python @abstractmethod def configure_fit( - self, - server_round: int, - parameters: Parameters, - client_manager: ClientManager + self, server_round: int, parameters: Parameters, client_manager: ClientManager ) -> List[Tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" -The return value is a list of tuples, each representing the instructions that will be sent to a particular client. Strategy implementations usually perform the following steps in :code:`configure_fit`: +The return value is a list of tuples, each representing the instructions that will be +sent to a particular client. Strategy implementations usually perform the following +steps in ``configure_fit``: -* Use the :code:`client_manager` to randomly sample all (or a subset of) available clients (each represented as a :code:`ClientProxy` object) -* Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the current global model :code:`parameters` and :code:`config` dict +- Use the ``client_manager`` to randomly sample all (or a subset of) available clients + (each represented as a ``ClientProxy`` object) +- Pair each ``ClientProxy`` with the same ``FitIns`` holding the current global model + ``parameters`` and ``config`` dict -More sophisticated implementations can use :code:`configure_fit` to implement custom client selection logic. A client will only participate in a round if the corresponding :code:`ClientProxy` is included in the list returned from :code:`configure_fit`. +More sophisticated implementations can use ``configure_fit`` to implement custom client +selection logic. A client will only participate in a round if the corresponding +``ClientProxy`` is included in the list returned from ``configure_fit``. .. note:: - The structure of this return value provides a lot of flexibility to the user. Since instructions are defined on a per-client basis, different instructions can be sent to each client. This enables custom strategies to train, for example, different models on different clients, or use different hyperparameters on different clients (via the :code:`config` dict). + The structure of this return value provides a lot of flexibility to the user. Since + instructions are defined on a per-client basis, different instructions can be sent + to each client. This enables custom strategies to train, for example, different + models on different clients, or use different hyperparameters on different clients + (via the ``config`` dict). -The :code:`aggregate_fit` method --------------------------------- +The ``aggregate_fit`` method +---------------------------- -:code:`aggregate_fit` is responsible for aggregating the results returned by the clients that were selected and asked to train in :code:`configure_fit`. +``aggregate_fit`` is responsible for aggregating the results returned by the clients +that were selected and asked to train in ``configure_fit``. .. code-block:: python @@ -255,42 +274,58 @@ The :code:`aggregate_fit` method ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: """Aggregate training results.""" -Of course, failures can happen, so there is no guarantee that the server will get results from all the clients it sent instructions to (via :code:`configure_fit`). :code:`aggregate_fit` therefore receives a list of :code:`results`, but also a list of :code:`failures`. +Of course, failures can happen, so there is no guarantee that the server will get +results from all the clients it sent instructions to (via ``configure_fit``). +``aggregate_fit`` therefore receives a list of ``results``, but also a list of +``failures``. -:code:`aggregate_fit` returns an optional :code:`Parameters` object and a dictionary of aggregated metrics. The :code:`Parameters` return value is optional because :code:`aggregate_fit` might decide that the results provided are not sufficient for aggregation (e.g., too many failures). +``aggregate_fit`` returns an optional ``Parameters`` object and a dictionary of +aggregated metrics. The ``Parameters`` return value is optional because +``aggregate_fit`` might decide that the results provided are not sufficient for +aggregation (e.g., too many failures). -The :code:`configure_evaluate` method -------------------------------------- +The ``configure_evaluate`` method +--------------------------------- -:code:`configure_evaluate` is responsible for configuring the upcoming round of evaluation. What does *configure* mean in this context? Configuring a round means selecting clients and deciding what instructions to send to these clients. The signature of :code:`configure_evaluate` makes this clear: +``configure_evaluate`` is responsible for configuring the upcoming round of evaluation. +What does *configure* mean in this context? Configuring a round means selecting clients +and deciding what instructions to send to these clients. The signature of +``configure_evaluate`` makes this clear: .. code-block:: python @abstractmethod def configure_evaluate( - self, - server_round: int, - parameters: Parameters, - client_manager: ClientManager + self, server_round: int, parameters: Parameters, client_manager: ClientManager ) -> List[Tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" -The return value is a list of tuples, each representing the instructions that will be sent to a particular client. Strategy implementations usually perform the following steps in :code:`configure_evaluate`: +The return value is a list of tuples, each representing the instructions that will be +sent to a particular client. Strategy implementations usually perform the following +steps in ``configure_evaluate``: -* Use the :code:`client_manager` to randomly sample all (or a subset of) available clients (each represented as a :code:`ClientProxy` object) -* Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding the current global model :code:`parameters` and :code:`config` dict +- Use the ``client_manager`` to randomly sample all (or a subset of) available clients + (each represented as a ``ClientProxy`` object) +- Pair each ``ClientProxy`` with the same ``EvaluateIns`` holding the current global + model ``parameters`` and ``config`` dict -More sophisticated implementations can use :code:`configure_evaluate` to implement custom client selection logic. A client will only participate in a round if the corresponding :code:`ClientProxy` is included in the list returned from :code:`configure_evaluate`. +More sophisticated implementations can use ``configure_evaluate`` to implement custom +client selection logic. A client will only participate in a round if the corresponding +``ClientProxy`` is included in the list returned from ``configure_evaluate``. .. note:: - The structure of this return value provides a lot of flexibility to the user. Since instructions are defined on a per-client basis, different instructions can be sent to each client. This enables custom strategies to evaluate, for example, different models on different clients, or use different hyperparameters on different clients (via the :code:`config` dict). - + The structure of this return value provides a lot of flexibility to the user. Since + instructions are defined on a per-client basis, different instructions can be sent + to each client. This enables custom strategies to evaluate, for example, different + models on different clients, or use different hyperparameters on different clients + (via the ``config`` dict). -The :code:`aggregate_evaluate` method -------------------------------------- +The ``aggregate_evaluate`` method +--------------------------------- -:code:`aggregate_evaluate` is responsible for aggregating the results returned by the clients that were selected and asked to evaluate in :code:`configure_evaluate`. +``aggregate_evaluate`` is responsible for aggregating the results returned by the +clients that were selected and asked to evaluate in ``configure_evaluate``. .. code-block:: python @@ -303,21 +338,29 @@ The :code:`aggregate_evaluate` method ) -> Tuple[Optional[float], Dict[str, Scalar]]: """Aggregate evaluation results.""" -Of course, failures can happen, so there is no guarantee that the server will get results from all the clients it sent instructions to (via :code:`configure_evaluate`). :code:`aggregate_evaluate` therefore receives a list of :code:`results`, but also a list of :code:`failures`. +Of course, failures can happen, so there is no guarantee that the server will get +results from all the clients it sent instructions to (via ``configure_evaluate``). +``aggregate_evaluate`` therefore receives a list of ``results``, but also a list of +``failures``. -:code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a dictionary of aggregated metrics. The :code:`float` return value is optional because :code:`aggregate_evaluate` might decide that the results provided are not sufficient for aggregation (e.g., too many failures). +``aggregate_evaluate`` returns an optional ``float`` (loss) and a dictionary of +aggregated metrics. The ``float`` return value is optional because +``aggregate_evaluate`` might decide that the results provided are not sufficient for +aggregation (e.g., too many failures). -The :code:`evaluate` method ---------------------------- +The ``evaluate`` method +----------------------- -:code:`evaluate` is responsible for evaluating model parameters on the server-side. Having :code:`evaluate` in addition to :code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies to perform both servers-side and client-side (federated) evaluation. +``evaluate`` is responsible for evaluating model parameters on the server-side. Having +``evaluate`` in addition to ``configure_evaluate``/``aggregate_evaluate`` enables +strategies to perform both servers-side and client-side (federated) evaluation. .. code-block:: python @abstractmethod - def evaluate( - self, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + def evaluate(self, parameters: Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]: """Evaluate the current model parameters.""" -The return value is again optional because the strategy might not need to implement server-side evaluation or because the user-defined :code:`evaluate` method might not complete successfully (e.g., it might fail to load the server-side evaluation data). +The return value is again optional because the strategy might not need to implement +server-side evaluation or because the user-defined ``evaluate`` method might not +complete successfully (e.g., it might fail to load the server-side evaluation data). diff --git a/doc/source/how-to-install-flower.rst b/doc/source/how-to-install-flower.rst index a621377c8ce6..89cdf8b836cf 100644 --- a/doc/source/how-to-install-flower.rst +++ b/doc/source/how-to-install-flower.rst @@ -1,12 +1,11 @@ Install Flower ============== - Python version -------------- -Flower requires at least `Python 3.9 `_, but `Python 3.10 `_ or above is recommended. - +Flower requires at least `Python 3.9 `_, but `Python 3.10 +`_ or above is recommended. Install stable release ---------------------- @@ -14,45 +13,56 @@ Install stable release Using pip ~~~~~~~~~ -Stable releases are available on `PyPI `_:: +Stable releases are available on `PyPI `_: + +:: - python -m pip install flwr + python -m pip install flwr -For simulations that use the Virtual Client Engine, ``flwr`` should be installed with the ``simulation`` extra:: +For simulations that use the Virtual Client Engine, ``flwr`` should be installed with +the ``simulation`` extra: - python -m pip install "flwr[simulation]" +:: + python -m pip install "flwr[simulation]" Using conda (or mamba) ~~~~~~~~~~~~~~~~~~~~~~ Flower can also be installed from the ``conda-forge`` channel. -If you have not added ``conda-forge`` to your channels, you will first need to run the following:: +If you have not added ``conda-forge`` to your channels, you will first need to run the +following: + +:: + + conda config --add channels conda-forge + conda config --set channel_priority strict - conda config --add channels conda-forge - conda config --set channel_priority strict +Once the ``conda-forge`` channel has been enabled, ``flwr`` can be installed with +``conda``: -Once the ``conda-forge`` channel has been enabled, ``flwr`` can be installed with ``conda``:: +:: - conda install flwr + conda install flwr -or with ``mamba``:: +or with ``mamba``: - mamba install flwr +:: + mamba install flwr Verify installation ------------------- -The following command can be used to verify if Flower was successfully installed. If everything worked, it should print the version of Flower to the command line: +The following command can be used to verify if Flower was successfully installed. If +everything worked, it should print the version of Flower to the command line: .. code-block:: bash - :substitutions: - - python -c "import flwr;print(flwr.__version__)" - |stable_flwr_version| + :substitutions: + python -c "import flwr;print(flwr.__version__)" + |stable_flwr_version| Advanced installation options ----------------------------- @@ -65,21 +75,32 @@ Install via Docker Install pre-release ~~~~~~~~~~~~~~~~~~~ -New (possibly unstable) versions of Flower are sometimes available as pre-release versions (alpha, beta, release candidate) before the stable release happens:: +New (possibly unstable) versions of Flower are sometimes available as pre-release +versions (alpha, beta, release candidate) before the stable release happens: + +:: + + python -m pip install -U --pre flwr - python -m pip install -U --pre flwr +For simulations that use the Virtual Client Engine, ``flwr`` pre-releases should be +installed with the ``simulation`` extra: -For simulations that use the Virtual Client Engine, ``flwr`` pre-releases should be installed with the ``simulation`` extra:: +:: - python -m pip install -U --pre 'flwr[simulation]' + python -m pip install -U --pre 'flwr[simulation]' Install nightly release ~~~~~~~~~~~~~~~~~~~~~~~ -The latest (potentially unstable) changes in Flower are available as nightly releases:: +The latest (potentially unstable) changes in Flower are available as nightly releases: + +:: + + python -m pip install -U flwr-nightly - python -m pip install -U flwr-nightly +For simulations that use the Virtual Client Engine, ``flwr-nightly`` should be installed +with the ``simulation`` extra: -For simulations that use the Virtual Client Engine, ``flwr-nightly`` should be installed with the ``simulation`` extra:: +:: - python -m pip install -U flwr-nightly[simulation] + python -m pip install -U flwr-nightly[simulation] diff --git a/doc/source/how-to-monitor-simulation.rst b/doc/source/how-to-monitor-simulation.rst index f6c26a701d94..f540e22a6a77 100644 --- a/doc/source/how-to-monitor-simulation.rst +++ b/doc/source/how-to-monitor-simulation.rst @@ -1,109 +1,120 @@ Monitor simulation ================== -Flower allows you to monitor system resources while running your simulation. Moreover, the Flower simulation engine is powerful and enables you to decide how to allocate resources per client manner and constrain the total usage. Insights from resource consumption can help you make smarter decisions and speed up the execution time. - -The specific instructions assume you are using macOS and have the `Homebrew `_ package manager installed. +Flower allows you to monitor system resources while running your simulation. Moreover, +the Flower simulation engine is powerful and enables you to decide how to allocate +resources per client manner and constrain the total usage. Insights from resource +consumption can help you make smarter decisions and speed up the execution time. +The specific instructions assume you are using macOS and have the `Homebrew +`_ package manager installed. Downloads --------- .. code-block:: bash - brew install prometheus grafana + brew install prometheus grafana -`Prometheus `_ is used for data collection, while `Grafana `_ will enable you to visualize the collected data. They are both well integrated with `Ray `_ which Flower uses under the hood. +`Prometheus `_ is used for data collection, while `Grafana +`_ will enable you to visualize the collected data. They are both +well integrated with `Ray `_ which Flower uses under the hood. -Overwrite the configuration files (depending on your device, it might be installed on a different path). +Overwrite the configuration files (depending on your device, it might be installed on a +different path). If you are on an M1 Mac, it should be: .. code-block:: bash - /opt/homebrew/etc/prometheus.yml - /opt/homebrew/etc/grafana/grafana.ini + /opt/homebrew/etc/prometheus.yml + /opt/homebrew/etc/grafana/grafana.ini On the previous generation Intel Mac devices, it should be: .. code-block:: bash - /usr/local/etc/prometheus.yml - /usr/local/etc/grafana/grafana.ini + /usr/local/etc/prometheus.yml + /usr/local/etc/grafana/grafana.ini -Open the respective configuration files and change them. Depending on your device, use one of the two following commands: +Open the respective configuration files and change them. Depending on your device, use +one of the two following commands: .. code-block:: bash - # M1 macOS - open /opt/homebrew/etc/prometheus.yml + # M1 macOS + open /opt/homebrew/etc/prometheus.yml - # Intel macOS - open /usr/local/etc/prometheus.yml + # Intel macOS + open /usr/local/etc/prometheus.yml -and then delete all the text in the file and paste a new Prometheus config you see below. You may adjust the time intervals to your requirements: +and then delete all the text in the file and paste a new Prometheus config you see +below. You may adjust the time intervals to your requirements: .. code-block:: bash - global: - scrape_interval: 1s - evaluation_interval: 1s + global: + scrape_interval: 1s + evaluation_interval: 1s - scrape_configs: - # Scrape from each ray node as defined in the service_discovery.json provided by ray. - - job_name: 'ray' - file_sd_configs: - - files: - - '/tmp/ray/prom_metrics_service_discovery.json' + scrape_configs: + # Scrape from each ray node as defined in the service_discovery.json provided by ray. + - job_name: 'ray' + file_sd_configs: + - files: + - '/tmp/ray/prom_metrics_service_discovery.json' -Now after you have edited the Prometheus configuration, do the same with the Grafana configuration files. Open those using one of the following commands as before: +Now after you have edited the Prometheus configuration, do the same with the Grafana +configuration files. Open those using one of the following commands as before: .. code-block:: python - # M1 macOS - open /opt/homebrew/etc/grafana/grafana.ini + # M1 macOS + open / opt / homebrew / etc / grafana / grafana.ini - # Intel macOS - open /usr/local/etc/grafana/grafana.ini + # Intel macOS + open / usr / local / etc / grafana / grafana.ini -Your terminal editor should open and allow you to apply the following configuration as before. +Your terminal editor should open and allow you to apply the following configuration as +before. .. code-block:: bash - [security] - allow_embedding = true - - [auth.anonymous] - enabled = true - org_name = Main Org. - org_role = Viewer + [security] + allow_embedding = true - [paths] - provisioning = /tmp/ray/session_latest/metrics/grafana/provisioning + [auth.anonymous] + enabled = true + org_name = Main Org. + org_role = Viewer -Congratulations, you just downloaded all the necessary software needed for metrics tracking. Now, let’s start it. + [paths] + provisioning = /tmp/ray/session_latest/metrics/grafana/provisioning +Congratulations, you just downloaded all the necessary software needed for metrics +tracking. Now, let’s start it. Tracking metrics ---------------- -Before running your Flower simulation, you have to start the monitoring tools you have just installed and configured. +Before running your Flower simulation, you have to start the monitoring tools you have +just installed and configured. .. code-block:: bash - brew services start prometheus - brew services start grafana + brew services start prometheus + brew services start grafana Please include the following argument in your Python code when starting a simulation. .. code-block:: python - fl.simulation.start_simulation( - # ... - # all the args you used before - # ... - ray_init_args = {"include_dashboard": True} - ) + fl.simulation.start_simulation( + # ... + # all the args you used before + # ... + ray_init_args={"include_dashboard": True} + ) Now, you are ready to start your workload. @@ -111,126 +122,140 @@ Shortly after the simulation starts, you should see the following logs in your t .. code-block:: bash - 2023-01-20 16:22:58,620 INFO [worker.py:1529](http://worker.py:1529/) -- Started a local Ray instance. View the dashboard at http://127.0.0.1:8265 - + 2023-01-20 16:22:58,620 INFO [worker.py:1529](http://worker.py:1529/) -- Started a local Ray instance. View the dashboard at http://127.0.0.1:8265 -You can look at everything at ``_ . +You can look at everything at http://127.0.0.1:8265 . -It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the lowest option). +It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the lowest +option). -Or alternatively, you can just see them in Grafana by clicking on the right-up corner, “View in Grafana”. Please note that the Ray dashboard is only accessible during the simulation. After the simulation ends, you can only use Grafana to explore the metrics. You can start Grafana by going to ``http://localhost:3000/``. +Or alternatively, you can just see them in Grafana by clicking on the right-up corner, +“View in Grafana”. Please note that the Ray dashboard is only accessible during the +simulation. After the simulation ends, you can only use Grafana to explore the metrics. +You can start Grafana by going to ``http://localhost:3000/``. -After you finish the visualization, stop Prometheus and Grafana. This is important as they will otherwise block, for example port :code:`3000` on your machine as long as they are running. +After you finish the visualization, stop Prometheus and Grafana. This is important as +they will otherwise block, for example port ``3000`` on your machine as long as they are +running. .. code-block:: bash - brew services stop prometheus - brew services stop grafana - + brew services stop prometheus + brew services stop grafana Resource allocation ------------------- -You must understand how the Ray library works to efficiently allocate system resources to simulation clients on your own. +You must understand how the Ray library works to efficiently allocate system resources +to simulation clients on your own. -Initially, the simulation (which Ray handles under the hood) starts by default with all the available resources on the system, which it shares among the clients. It doesn't mean it divides it equally among all of them, nor that the model training happens at all of them simultaneously. You will learn more about that in the later part of this blog. You can check the system resources by running the following: +Initially, the simulation (which Ray handles under the hood) starts by default with all +the available resources on the system, which it shares among the clients. It doesn't +mean it divides it equally among all of them, nor that the model training happens at all +of them simultaneously. You will learn more about that in the later part of this blog. +You can check the system resources by running the following: .. code-block:: python - import ray - ray.available_resources() + import ray + + ray.available_resources() In Google Colab, the result you see might be similar to this: .. code-block:: bash - {'memory': 8020104807.0, - 'GPU': 1.0, - 'object_store_memory': 4010052403.0, - 'CPU': 2.0, - 'accelerator_type:T4': 1.0, - 'node:172.28.0.2': 1.0} + {'memory': 8020104807.0, + 'GPU': 1.0, + 'object_store_memory': 4010052403.0, + 'CPU': 2.0, + 'accelerator_type:T4': 1.0, + 'node:172.28.0.2': 1.0} - -However, you can overwrite the defaults. When starting a simulation, do the following (you don't need to overwrite all of them): +However, you can overwrite the defaults. When starting a simulation, do the following +(you don't need to overwrite all of them): .. code-block:: python - num_cpus = 2 - num_gpus = 1 - ram_memory = 16_000 * 1024 * 1024 # 16 GB - fl.simulation.start_simulation( - # ... - # all the args you were specifying before - # ... - ray_init_args = { - "include_dashboard": True, # we need this one for tracking - "num_cpus": num_cpus, - "num_gpus": num_gpus, - "memory": ram_memory, - } - ) - + num_cpus = 2 + num_gpus = 1 + ram_memory = 16_000 * 1024 * 1024 # 16 GB + fl.simulation.start_simulation( + # ... + # all the args you were specifying before + # ... + ray_init_args={ + "include_dashboard": True, # we need this one for tracking + "num_cpus": num_cpus, + "num_gpus": num_gpus, + "memory": ram_memory, + } + ) Let’s also specify the resource for a single client. .. code-block:: python - # Total resources for simulation - num_cpus = 4 - num_gpus = 1 - ram_memory = 16_000 * 1024 * 1024 # 16 GB - - # Single client resources - client_num_cpus = 2 - client_num_gpus = 1 - - fl.simulation.start_simulation( - # ... - # all the args you were specifying before - # ... - ray_init_args = { - "include_dashboard": True, # we need this one for tracking - "num_cpus": num_cpus, - "num_gpus": num_gpus, - "memory": ram_memory, - }, - # The argument below is new - client_resources = { - "num_cpus": client_num_cpus, - "num_gpus": client_num_gpus, - } - ) - -Now comes the crucial part. Ray will start a new client only when it has all the required resources (such that they run in parallel) when the resources allow. - -In the example above, only one client will be run, so your clients won't run concurrently. Setting :code:`client_num_gpus = 0.5` would allow running two clients and therefore enable them to run concurrently. -Be careful not to require more resources than available. If you specified :code:`client_num_gpus = 2`, the simulation wouldn't start (even if you had 2 GPUs but decided to set 1 in :code:`ray_init_args`). - + # Total resources for simulation + num_cpus = 4 + num_gpus = 1 + ram_memory = 16_000 * 1024 * 1024 # 16 GB + + # Single client resources + client_num_cpus = 2 + client_num_gpus = 1 + + fl.simulation.start_simulation( + # ... + # all the args you were specifying before + # ... + ray_init_args={ + "include_dashboard": True, # we need this one for tracking + "num_cpus": num_cpus, + "num_gpus": num_gpus, + "memory": ram_memory, + }, + # The argument below is new + client_resources={ + "num_cpus": client_num_cpus, + "num_gpus": client_num_gpus, + }, + ) + +Now comes the crucial part. Ray will start a new client only when it has all the +required resources (such that they run in parallel) when the resources allow. + +In the example above, only one client will be run, so your clients won't run +concurrently. Setting ``client_num_gpus = 0.5`` would allow running two clients and +therefore enable them to run concurrently. Be careful not to require more resources than +available. If you specified ``client_num_gpus = 2``, the simulation wouldn't start (even +if you had 2 GPUs but decided to set 1 in ``ray_init_args``). FAQ --- Q: I don't see any metrics logged. -A: The timeframe might not be properly set. The setting is in the top right corner ("Last 30 minutes" by default). Please change the timeframe to reflect the period when the simulation was running. +A: The timeframe might not be properly set. The setting is in the top right corner +("Last 30 minutes" by default). Please change the timeframe to reflect the period when +the simulation was running. -Q: I see “Grafana server not detected. Please make sure the Grafana server is running and refresh this page” after going to the Metrics tab in Ray Dashboard. +Q: I see “Grafana server not detected. Please make sure the Grafana server is running +and refresh this page” after going to the Metrics tab in Ray Dashboard. A: You probably don't have Grafana running. Please check the running services .. code-block:: bash - brew services list + brew services list -Q: I see "This site can't be reached" when going to ``_. +Q: I see "This site can't be reached" when going to http://127.0.0.1:8265. A: Either the simulation has already finished, or you still need to start Prometheus. - Resources --------- -Ray Dashboard: ``_ +Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-started.html -Ray Metrics: ``_ +Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html diff --git a/doc/source/how-to-run-simulations.rst b/doc/source/how-to-run-simulations.rst index d1dcb511ed51..fb4eed17b4e7 100644 --- a/doc/source/how-to-run-simulations.rst +++ b/doc/source/how-to-run-simulations.rst @@ -1,48 +1,85 @@ Run simulations =============== -.. youtube:: cRebUIGB5RU - :url_parameters: ?list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB - :width: 100% - -Simulating Federated Learning workloads is useful for a multitude of use-cases: you might want to run your workload on a large cohort of clients but without having to source, configure and mange a large number of physical devices; you might want to run your FL workloads as fast as possible on the compute systems you have access to without having to go through a complex setup process; you might want to validate your algorithm on different scenarios at varying levels of data and system heterogeneity, client availability, privacy budgets, etc. These are among some of the use-cases where simulating FL workloads makes sense. Flower can accommodate these scenarios by means of its `VirtualClientEngine `_ or VCE. - -The :code:`VirtualClientEngine` schedules, launches and manages `virtual` clients. These clients are identical to `non-virtual` clients (i.e. the ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by creating a class inheriting, for example, from `flwr.client.NumPyClient `_ and therefore behave in an identical way. In addition to that, clients managed by the :code:`VirtualClientEngine` are: - -* resource-aware: this means that each client gets assigned a portion of the compute and memory on your system. You as a user can control this at the beginning of the simulation and allows you to control the degree of parallelism of your Flower FL simulation. The fewer the resources per client, the more clients can run concurrently on the same hardware. -* self-managed: this means that you as a user do not need to launch clients manually, instead this gets delegated to :code:`VirtualClientEngine`'s internals. -* ephemeral: this means that a client is only materialized when it is required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards, releasing the resources it was assigned and allowing in this way other clients to participate. - -The :code:`VirtualClientEngine` implements `virtual` clients using `Ray `_, an open-source framework for scalable Python workloads. In particular, Flower's :code:`VirtualClientEngine` makes use of `Actors `_ to spawn `virtual` clients and run their workload. - +.. youtube:: cRebUIGB5RU + :url_parameters: ?list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB + :width: 100% + +Simulating Federated Learning workloads is useful for a multitude of use-cases: you +might want to run your workload on a large cohort of clients but without having to +source, configure and mange a large number of physical devices; you might want to run +your FL workloads as fast as possible on the compute systems you have access to without +having to go through a complex setup process; you might want to validate your algorithm +on different scenarios at varying levels of data and system heterogeneity, client +availability, privacy budgets, etc. These are among some of the use-cases where +simulating FL workloads makes sense. Flower can accommodate these scenarios by means of +its `VirtualClientEngine +`_ or VCE. + +The ``VirtualClientEngine`` schedules, launches and manages `virtual` clients. These +clients are identical to `non-virtual` clients (i.e. the ones you launch via the command +`flwr.client.start_client `_) in the sense that they can +be configure by creating a class inheriting, for example, from `flwr.client.NumPyClient +`_ and therefore behave in an identical way. +In addition to that, clients managed by the ``VirtualClientEngine`` are: + +- resource-aware: this means that each client gets assigned a portion of the compute and + memory on your system. You as a user can control this at the beginning of the + simulation and allows you to control the degree of parallelism of your Flower FL + simulation. The fewer the resources per client, the more clients can run concurrently + on the same hardware. +- self-managed: this means that you as a user do not need to launch clients manually, + instead this gets delegated to ``VirtualClientEngine``'s internals. +- ephemeral: this means that a client is only materialized when it is required in the FL + process (e.g. to do `fit() `_). The object + is destroyed afterwards, releasing the resources it was assigned and allowing in this + way other clients to participate. + +The ``VirtualClientEngine`` implements `virtual` clients using `Ray +`_, an open-source framework for scalable Python workloads. In +particular, Flower's ``VirtualClientEngine`` makes use of `Actors +`_ to spawn `virtual` clients and +run their workload. Launch your Flower simulation ----------------------------- -Running Flower simulations still require you to define your client class, a strategy, and utility functions to download and load (and potentially partition) your dataset. With that out of the way, launching your simulation is done with `start_simulation `_ and a minimal example looks as follows: - +Running Flower simulations still require you to define your client class, a strategy, +and utility functions to download and load (and potentially partition) your dataset. +With that out of the way, launching your simulation is done with `start_simulation +`_ and a minimal example looks as +follows: .. code-block:: python import flwr as fl from flwr.server.strategy import FedAvg - + + def client_fn(cid: str): # Return a standard Flower client return MyFlowerClient().to_client() + # Launch the simulation hist = fl.simulation.start_simulation( - client_fn=client_fn, # A function to run a _virtual_ client when required - num_clients=50, # Total number of clients available - config=fl.server.ServerConfig(num_rounds=3), # Specify number of FL rounds - strategy=FedAvg() # A Flower strategy + client_fn=client_fn, # A function to run a _virtual_ client when required + num_clients=50, # Total number of clients available + config=fl.server.ServerConfig(num_rounds=3), # Specify number of FL rounds + strategy=FedAvg(), # A Flower strategy ) - VirtualClientEngine resources ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -By default the VCE has access to all system resources (i.e. all CPUs, all GPUs, etc) since that is also the default behavior when starting Ray. However, in some settings you might want to limit how many of your system resources are used for simulation. You can do this via the :code:`ray_init_args` input argument to :code:`start_simulation` which the VCE internally passes to Ray's :code:`ray.init` command. For a complete list of settings you can configure check the `ray.init `_ documentation. Do not set :code:`ray_init_args` if you want the VCE to use all your system's CPUs and GPUs. + +By default the VCE has access to all system resources (i.e. all CPUs, all GPUs, etc) +since that is also the default behavior when starting Ray. However, in some settings you +might want to limit how many of your system resources are used for simulation. You can +do this via the ``ray_init_args`` input argument to ``start_simulation`` which the VCE +internally passes to Ray's ``ray.init`` command. For a complete list of settings you can +configure check the `ray.init +`_ documentation. +Do not set ``ray_init_args`` if you want the VCE to use all your system's CPUs and GPUs. .. code-block:: python @@ -50,22 +87,28 @@ By default the VCE has access to all system resources (i.e. all CPUs, all GPUs, # Launch the simulation by limiting resources visible to Flower's VCE hist = fl.simulation.start_simulation( - ... + # ... # Out of all CPUs and GPUs available in your system, # only 8xCPUs and 1xGPUs would be used for simulation. - ray_init_args = {'num_cpus': 8, 'num_gpus': 1} + ray_init_args={"num_cpus": 8, "num_gpus": 1} ) - - Assigning client resources ~~~~~~~~~~~~~~~~~~~~~~~~~~ -By default the :code:`VirtualClientEngine` assigns a single CPU core (and nothing else) to each virtual client. This means that if your system has 10 cores, that many virtual clients can be concurrently running. -More often than not, you would probably like to adjust the resources your clients get assigned based on the complexity (i.e. compute and memory footprint) of your FL workload. You can do so when starting your simulation by setting the argument `client_resources` to `start_simulation `_. Two keys are internally used by Ray to schedule and spawn workloads (in our case Flower clients): +By default the ``VirtualClientEngine`` assigns a single CPU core (and nothing else) to +each virtual client. This means that if your system has 10 cores, that many virtual +clients can be concurrently running. -* :code:`num_cpus` indicates the number of CPU cores a client would get. -* :code:`num_gpus` indicates the **ratio** of GPU memory a client gets assigned. +More often than not, you would probably like to adjust the resources your clients get +assigned based on the complexity (i.e. compute and memory footprint) of your FL +workload. You can do so when starting your simulation by setting the argument +`client_resources` to `start_simulation +`_. Two keys are internally used by +Ray to schedule and spawn workloads (in our case Flower clients): + +- ``num_cpus`` indicates the number of CPU cores a client would get. +- ``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned. Let's see a few examples: @@ -74,90 +117,140 @@ Let's see a few examples: import flwr as fl # each client gets 1xCPU (this is the default if no resources are specified) - my_client_resources = {'num_cpus': 1, 'num_gpus': 0.0} + my_client_resources = {"num_cpus": 1, "num_gpus": 0.0} # each client gets 2xCPUs and half a GPU. (with a single GPU, 2 clients run concurrently) - my_client_resources = {'num_cpus': 2, 'num_gpus': 0.5} + my_client_resources = {"num_cpus": 2, "num_gpus": 0.5} # 10 client can run concurrently on a single GPU, but only if you have 20 CPU threads. - my_client_resources = {'num_cpus': 2, 'num_gpus': 0.1} + my_client_resources = {"num_cpus": 2, "num_gpus": 0.1} # Launch the simulation hist = fl.simulation.start_simulation( - ... - client_resources = my_client_resources # A Python dict specifying CPU/GPU resources + # ... + client_resources=my_client_resources # A Python dict specifying CPU/GPU resources ) -While the :code:`client_resources` can be used to control the degree of concurrency in your FL simulation, this does not stop you from running dozens, hundreds or even thousands of clients in the same round and having orders of magnitude more `dormant` (i.e. not participating in a round) clients. Let's say you want to have 100 clients per round but your system can only accommodate 8 clients concurrently. The :code:`VirtualClientEngine` will schedule 100 jobs to run (each simulating a client sampled by the strategy) and then will execute them in a resource-aware manner in batches of 8. +While the ``client_resources`` can be used to control the degree of concurrency in your +FL simulation, this does not stop you from running dozens, hundreds or even thousands of +clients in the same round and having orders of magnitude more `dormant` (i.e. not +participating in a round) clients. Let's say you want to have 100 clients per round but +your system can only accommodate 8 clients concurrently. The ``VirtualClientEngine`` +will schedule 100 jobs to run (each simulating a client sampled by the strategy) and +then will execute them in a resource-aware manner in batches of 8. -To understand all the intricate details on how resources are used to schedule FL clients and how to define custom resources, please take a look at the `Ray documentation `_. +To understand all the intricate details on how resources are used to schedule FL clients +and how to define custom resources, please take a look at the `Ray documentation +`_. Simulation examples ~~~~~~~~~~~~~~~~~~~ -A few ready-to-run complete examples for Flower simulation in Tensorflow/Keras and PyTorch are provided in the `Flower repository `_. You can run them on Google Colab too: - -* `Tensorflow/Keras Simulation `_: 100 clients collaboratively train a MLP model on MNIST. -* `PyTorch Simulation `_: 100 clients collaboratively train a CNN model on MNIST. - +A few ready-to-run complete examples for Flower simulation in Tensorflow/Keras and +PyTorch are provided in the `Flower repository `_. You +can run them on Google Colab too: +- `Tensorflow/Keras Simulation + `_: 100 + clients collaboratively train a MLP model on MNIST. +- `PyTorch Simulation + `_: 100 clients + collaboratively train a CNN model on MNIST. Multi-node Flower simulations ----------------------------- -Flower's :code:`VirtualClientEngine` allows you to run FL simulations across multiple compute nodes. Before starting your multi-node simulation ensure that you: - -#. Have the same Python environment in all nodes. -#. Have a copy of your code (e.g. your entire repo) in all nodes. -#. Have a copy of your dataset in all nodes (more about this in :ref:`simulation considerations `) -#. Pass :code:`ray_init_args={"address"="auto"}` to `start_simulation `_ so the :code:`VirtualClientEngine` attaches to a running Ray instance. -#. Start Ray on you head node: on the terminal type :code:`ray start --head`. This command will print a few lines, one of which indicates how to attach other nodes to the head node. -#. Attach other nodes to the head node: copy the command shown after starting the head and execute it on terminal of a new node: for example :code:`ray start --address='192.168.1.132:6379'` - -With all the above done, you can run your code from the head node as you would if the simulation was running on a single node. - -Once your simulation is finished, if you'd like to dismantle your cluster you simply need to run the command :code:`ray stop` in each node's terminal (including the head node). +Flower's ``VirtualClientEngine`` allows you to run FL simulations across multiple +compute nodes. Before starting your multi-node simulation ensure that you: + +1. Have the same Python environment in all nodes. +2. Have a copy of your code (e.g. your entire repo) in all nodes. +3. Have a copy of your dataset in all nodes (more about this in :ref:`simulation + considerations `) +4. Pass ``ray_init_args={"address"="auto"}`` to `start_simulation + `_ so the ``VirtualClientEngine`` + attaches to a running Ray instance. +5. Start Ray on you head node: on the terminal type ``ray start --head``. This command + will print a few lines, one of which indicates how to attach other nodes to the head + node. +6. Attach other nodes to the head node: copy the command shown after starting the head + and execute it on terminal of a new node: for example ``ray start + --address='192.168.1.132:6379'`` + +With all the above done, you can run your code from the head node as you would if the +simulation was running on a single node. + +Once your simulation is finished, if you'd like to dismantle your cluster you simply +need to run the command ``ray stop`` in each node's terminal (including the head node). Multi-node simulation good-to-know ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here we list a few interesting functionality when running multi-node FL simulations: -User :code:`ray status` to check all nodes connected to your head node as well as the total resources available to the :code:`VirtualClientEngine`. +User ``ray status`` to check all nodes connected to your head node as well as the total +resources available to the ``VirtualClientEngine``. -When attaching a new node to the head, all its resources (i.e. all CPUs, all GPUs) will be visible by the head node. This means that the :code:`VirtualClientEngine` can schedule as many `virtual` clients as that node can possible run. In some settings you might want to exclude certain resources from the simulation. You can do this by appending `--num-cpus=` and/or `--num-gpus=` in any :code:`ray start` command (including when starting the head) +When attaching a new node to the head, all its resources (i.e. all CPUs, all GPUs) will +be visible by the head node. This means that the ``VirtualClientEngine`` can schedule as +many `virtual` clients as that node can possible run. In some settings you might want to +exclude certain resources from the simulation. You can do this by appending +`--num-cpus=` and/or `--num-gpus=` in any ``ray +start`` command (including when starting the head) .. _considerations-for-simulations: - Considerations for simulations ------------------------------ .. note:: - We are actively working on these fronts so to make it trivial to run any FL workload with Flower simulation. + We are actively working on these fronts so to make it trivial to run any FL workload + with Flower simulation. -The current VCE allows you to run Federated Learning workloads in simulation mode whether you are prototyping simple scenarios on your personal laptop or you want to train a complex FL pipeline across multiple high-performance GPU nodes. While we add more capabilities to the VCE, the points below highlight some of the considerations to keep in mind when designing your FL pipeline with Flower. We also highlight a couple of current limitations in our implementation. +The current VCE allows you to run Federated Learning workloads in simulation mode +whether you are prototyping simple scenarios on your personal laptop or you want to +train a complex FL pipeline across multiple high-performance GPU nodes. While we add +more capabilities to the VCE, the points below highlight some of the considerations to +keep in mind when designing your FL pipeline with Flower. We also highlight a couple of +current limitations in our implementation. GPU resources ~~~~~~~~~~~~~ -The VCE assigns a share of GPU memory to a client that specifies the key :code:`num_gpus` in :code:`client_resources`. This being said, Ray (used internally by the VCE) is by default: - - -* not aware of the total VRAM available on the GPUs. This means that if you set :code:`num_gpus=0.5` and you have two GPUs in your system with different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients concurrently. -* not aware of other unrelated (i.e. not created by the VCE) workloads are running on the GPU. Two takeaways from this are: +The VCE assigns a share of GPU memory to a client that specifies the key ``num_gpus`` in +``client_resources``. This being said, Ray (used internally by the VCE) is by default: - * Your Flower server might need a GPU to evaluate the `global model` after aggregation (by instance when making use of the `evaluate method `_) - * If you want to run several independent Flower simulations on the same machine you need to mask-out your GPUs with :code:`CUDA_VISIBLE_DEVICES=""` when launching your experiment. +- not aware of the total VRAM available on the GPUs. This means that if you set + ``num_gpus=0.5`` and you have two GPUs in your system with different (e.g. 32GB and + 8GB) VRAM amounts, they both would run 2 clients concurrently. +- not aware of other unrelated (i.e. not created by the VCE) workloads are running on + the GPU. Two takeaways from this are: + - Your Flower server might need a GPU to evaluate the `global model` after aggregation + (by instance when making use of the `evaluate method + `_) + - If you want to run several independent Flower simulations on the same machine you + need to mask-out your GPUs with ``CUDA_VISIBLE_DEVICES=""`` when launching + your experiment. -In addition, the GPU resource limits passed to :code:`client_resources` are not `enforced` (i.e. they can be exceeded) which can result in the situation of client using more VRAM than the ratio specified when starting the simulation. +In addition, the GPU resource limits passed to ``client_resources`` are not `enforced` +(i.e. they can be exceeded) which can result in the situation of client using more VRAM +than the ratio specified when starting the simulation. TensorFlow with GPUs -"""""""""""""""""""" +++++++++++++++++++++ -When `using a GPU with TensorFlow `_ nearly your entire GPU memory of all your GPUs visible to the process will be mapped. This is done by TensorFlow for optimization purposes. However, in settings such as FL simulations where we want to split the GPU into multiple `virtual` clients, this is not a desirable mechanism. Luckily we can disable this default behavior by `enabling memory growth `_. +When `using a GPU with TensorFlow `_ nearly your +entire GPU memory of all your GPUs visible to the process will be mapped. This is done +by TensorFlow for optimization purposes. However, in settings such as FL simulations +where we want to split the GPU into multiple `virtual` clients, this is not a desirable +mechanism. Luckily we can disable this default behavior by `enabling memory growth +`_. -This would need to be done in the main process (which is where the server would run) and in each Actor created by the VCE. By means of :code:`actor_kwargs` we can pass the reserved key `"on_actor_init_fn"` in order to specify a function to be executed upon actor initialization. In this case, to enable GPU growth for TF workloads. It would look as follows: +This would need to be done in the main process (which is where the server would run) and +in each Actor created by the VCE. By means of ``actor_kwargs`` we can pass the reserved +key `"on_actor_init_fn"` in order to specify a function to be executed upon actor +initialization. In this case, to enable GPU growth for TF workloads. It would look as +follows: .. code-block:: python @@ -170,19 +263,29 @@ This would need to be done in the main process (which is where the server would # Start Flower simulation hist = fl.simulation.start_simulation( - ... + # ... actor_kwargs={ - "on_actor_init_fn": enable_tf_gpu_growth # <-- To be executed upon actor init. + "on_actor_init_fn": enable_tf_gpu_growth # <-- To be executed upon actor init. }, ) -This is precisely the mechanism used in `Tensorflow/Keras Simulation `_ example. - +This is precisely the mechanism used in `Tensorflow/Keras Simulation +`_ example. Multi-node setups ~~~~~~~~~~~~~~~~~ -* The VCE does not currently offer a way to control on which node a particular `virtual` client is executed. In other words, if more than a single node have the resources needed by a client to run, then any of those nodes could get the client workload scheduled onto. Later in the FL process (i.e. in a different round) the same client could be executed by a different node. Depending on how your clients access their datasets, this might require either having a copy of all dataset partitions on all nodes or a dataset serving mechanism (e.g. using nfs, a database) to circumvent data duplication. - -* By definition virtual clients are `stateless` due to their ephemeral nature. A client state can be implemented as part of the Flower client class but users need to ensure this saved to persistent storage (e.g. a database, disk) and that can be retrieve later by the same client regardless on which node it is running from. This is related to the point above also since, in some way, the client's dataset could be seen as a type of `state`. - +- The VCE does not currently offer a way to control on which node a particular `virtual` + client is executed. In other words, if more than a single node have the resources + needed by a client to run, then any of those nodes could get the client workload + scheduled onto. Later in the FL process (i.e. in a different round) the same client + could be executed by a different node. Depending on how your clients access their + datasets, this might require either having a copy of all dataset partitions on all + nodes or a dataset serving mechanism (e.g. using nfs, a database) to circumvent data + duplication. +- By definition virtual clients are `stateless` due to their ephemeral nature. A client + state can be implemented as part of the Flower client class but users need to ensure + this saved to persistent storage (e.g. a database, disk) and that can be retrieve + later by the same client regardless on which node it is running from. This is related + to the point above also since, in some way, the client's dataset could be seen as a + type of `state`. diff --git a/doc/source/how-to-save-and-load-model-checkpoints.rst b/doc/source/how-to-save-and-load-model-checkpoints.rst index 0d711e375cd8..f2f12dae97be 100644 --- a/doc/source/how-to-save-and-load-model-checkpoints.rst +++ b/doc/source/how-to-save-and-load-model-checkpoints.rst @@ -1,17 +1,19 @@ Save and load model checkpoints =============================== -Flower does not automatically save model updates on the server-side. This how-to guide describes the steps to save (and load) model checkpoints in Flower. - +Flower does not automatically save model updates on the server-side. This how-to guide +describes the steps to save (and load) model checkpoints in Flower. Model checkpointing ------------------- -Model updates can be persisted on the server-side by customizing :code:`Strategy` methods. -Implementing custom strategies is always an option, but for many cases it may be more convenient to simply customize an existing strategy. -The following code example defines a new :code:`SaveModelStrategy` which customized the existing built-in :code:`FedAvg` strategy. -In particular, it customizes :code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class (:code:`FedAvg`). -It then continues to save returned (aggregated) weights before it returns those aggregated weights to the caller (i.e., the server): +Model updates can be persisted on the server-side by customizing ``Strategy`` methods. +Implementing custom strategies is always an option, but for many cases it may be more +convenient to simply customize an existing strategy. The following code example defines +a new ``SaveModelStrategy`` which customized the existing built-in ``FedAvg`` strategy. +In particular, it customizes ``aggregate_fit`` by calling ``aggregate_fit`` in the base +class (``FedAvg``). It then continues to save returned (aggregated) weights before it +returns those aggregated weights to the caller (i.e., the server): .. code-block:: python @@ -24,11 +26,15 @@ It then continues to save returned (aggregated) weights before it returns those ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: # Call aggregate_fit from base class (FedAvg) to aggregate parameters and metrics - aggregated_parameters, aggregated_metrics = super().aggregate_fit(server_round, results, failures) - + aggregated_parameters, aggregated_metrics = super().aggregate_fit( + server_round, results, failures + ) + if aggregated_parameters is not None: # Convert `Parameters` to `List[np.ndarray]` - aggregated_ndarrays: List[np.ndarray] = fl.common.parameters_to_ndarrays(aggregated_parameters) + aggregated_ndarrays: List[np.ndarray] = fl.common.parameters_to_ndarrays( + aggregated_parameters + ) # Save aggregated_ndarrays print(f"Saving round {server_round} aggregated_ndarrays...") @@ -36,24 +42,27 @@ It then continues to save returned (aggregated) weights before it returns those return aggregated_parameters, aggregated_metrics + # Create strategy and run server strategy = SaveModelStrategy( # (same arguments as FedAvg here) ) fl.server.start_server(strategy=strategy) - Save and load PyTorch checkpoints --------------------------------- -Similar to the previous example but with a few extra steps, we'll show how to -store a PyTorch checkpoint we'll use the ``torch.save`` function. -Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be transformed into a list of NumPy ``ndarray``'s, -then those are transformed into the PyTorch ``state_dict`` following the ``OrderedDict`` class structure. +Similar to the previous example but with a few extra steps, we'll show how to store a +PyTorch checkpoint we'll use the ``torch.save`` function. Firstly, ``aggregate_fit`` +returns a ``Parameters`` object that has to be transformed into a list of NumPy +``ndarray``'s, then those are transformed into the PyTorch ``state_dict`` following the +``OrderedDict`` class structure. .. code-block:: python net = cifar.Net().to(DEVICE) + + class SaveModelStrategy(fl.server.strategy.FedAvg): def aggregate_fit( self, @@ -64,14 +73,18 @@ then those are transformed into the PyTorch ``state_dict`` following the ``Order """Aggregate model weights using weighted average and store checkpoint""" # Call aggregate_fit from base class (FedAvg) to aggregate parameters and metrics - aggregated_parameters, aggregated_metrics = super().aggregate_fit(server_round, results, failures) - + aggregated_parameters, aggregated_metrics = super().aggregate_fit( + server_round, results, failures + ) + if aggregated_parameters is not None: print(f"Saving round {server_round} aggregated_parameters...") # Convert `Parameters` to `List[np.ndarray]` - aggregated_ndarrays: List[np.ndarray] = fl.common.parameters_to_ndarrays(aggregated_parameters) - + aggregated_ndarrays: List[np.ndarray] = fl.common.parameters_to_ndarrays( + aggregated_parameters + ) + # Convert `List[np.ndarray]` to PyTorch`state_dict` params_dict = zip(net.state_dict().keys(), aggregated_ndarrays) state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) @@ -82,7 +95,8 @@ then those are transformed into the PyTorch ``state_dict`` following the ``Order return aggregated_parameters, aggregated_metrics -To load your progress, you simply append the following lines to your code. Note that this will iterate over all saved checkpoints and load the latest one: +To load your progress, you simply append the following lines to your code. Note that +this will iterate over all saved checkpoints and load the latest one: .. code-block:: python @@ -94,4 +108,5 @@ To load your progress, you simply append the following lines to your code. Note state_dict_ndarrays = [v.cpu().numpy() for v in net.state_dict().values()] parameters = fl.common.ndarrays_to_parameters(state_dict_ndarrays) -Return/use this object of type ``Parameters`` wherever necessary, such as in the ``initial_parameters`` when defining a ``Strategy``. \ No newline at end of file +Return/use this object of type ``Parameters`` wherever necessary, such as in the +``initial_parameters`` when defining a ``Strategy``. diff --git a/doc/source/how-to-upgrade-to-flower-1.0.rst b/doc/source/how-to-upgrade-to-flower-1.0.rst index c0721b0f3736..5f10f16a551f 100644 --- a/doc/source/how-to-upgrade-to-flower-1.0.rst +++ b/doc/source/how-to-upgrade-to-flower-1.0.rst @@ -1,8 +1,10 @@ Upgrade to Flower 1.0 ===================== -Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable foundation for future growth. Compared to Flower 0.19 (and other 0.x series releases), there are a few breaking changes that make it necessary to change the code of existing 0.x-series projects. - +Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable foundation for +future growth. Compared to Flower 0.19 (and other 0.x series releases), there are a few +breaking changes that make it necessary to change the code of existing 0.x-series +projects. Install update -------------- @@ -14,11 +16,13 @@ Here's how to update an existing installation to Flower 1.0 using either pip or - ``python -m pip install -U flwr`` (when using ``start_server`` and ``start_client``) - ``python -m pip install -U 'flwr[simulation]'`` (when using ``start_simulation``) -- Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before running ``poetry install``). +- Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall (don't + forget to delete ``poetry.lock`` via ``rm poetry.lock`` before running ``poetry + install``). - ``flwr = "^1.0.0"`` (when using ``start_server`` and ``start_client``) - - ``flwr = { version = "^1.0.0", extras = ["simulation"] }`` (when using ``start_simulation``) - + - ``flwr = { version = "^1.0.0", extras = ["simulation"] }`` (when using + ``start_simulation``) Required changes ---------------- @@ -28,64 +32,96 @@ The following breaking changes require manual updates. General ~~~~~~~ -Pass all arguments as keyword arguments (not as positional arguments). Here's an example: +Pass all arguments as keyword arguments (not as positional arguments). Here's an +example: - Flower 0.19 (positional arguments): ``start_client("127.0.0.1:8080", FlowerClient())`` -- Flower 1.0 (keyword arguments): ``start_client(server_address="127.0.0.1:8080", client=FlowerClient())`` +- Flower 1.0 (keyword arguments): ``start_client(server_address="127.0.0.1:8080", + client=FlowerClient())`` Client ~~~~~~ -- Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to ``def get_parameters(self, config):`` -- Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def get_parameters(self, ins: GetParametersIns):`` +- Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to ``def + get_parameters(self, config):`` +- Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def + get_parameters(self, ins: GetParametersIns):`` Strategies / ``start_server`` / ``start_simulation`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and ``start_simulation``. Here's an example: +- Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and + ``start_simulation``. Here's an example: - - Flower 0.19: ``start_server(..., config={"num_rounds": 3, "round_timeout": 600.0}, ...)`` - - Flower 1.0: ``start_server(..., config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), ...)`` + - Flower 0.19: ``start_server(..., config={"num_rounds": 3, "round_timeout": 600.0}, + ...)`` + - Flower 1.0: ``start_server(..., config=flwr.server.ServerConfig(num_rounds=3, + round_timeout=600.0), ...)`` -- Replace ``num_rounds=1`` in ``start_simulation`` with the new ``config=ServerConfig(...)`` (see previous item) -- Remove ``force_final_distributed_eval`` parameter from calls to ``start_server``. Distributed evaluation on all clients can be enabled by configuring the strategy to sample all clients for evaluation after the last round of training. +- Replace ``num_rounds=1`` in ``start_simulation`` with the new + ``config=ServerConfig(...)`` (see previous item) +- Remove ``force_final_distributed_eval`` parameter from calls to ``start_server``. + Distributed evaluation on all clients can be enabled by configuring the strategy to + sample all clients for evaluation after the last round of training. - Rename parameter/ndarray conversion functions: - ``parameters_to_weights`` --> ``parameters_to_ndarrays`` - ``weights_to_parameters`` --> ``ndarrays_to_parameters`` -- Strategy initialization: if the strategy relies on the default values for ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and ``fraction_evaluate`` manually to ``0.1``. Projects that do not manually create a strategy (by calling ``start_server`` or ``start_simulation`` without passing a strategy instance) should now manually initialize FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``. +- Strategy initialization: if the strategy relies on the default values for + ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and + ``fraction_evaluate`` manually to ``0.1``. Projects that do not manually create a + strategy (by calling ``start_server`` or ``start_simulation`` without passing a + strategy instance) should now manually initialize FedAvg with ``fraction_fit`` and + ``fraction_evaluate`` set to ``0.1``. - Rename built-in strategy parameters (e.g., ``FedAvg``): - ``fraction_eval`` --> ``fraction_evaluate`` - ``min_eval_clients`` --> ``min_evaluate_clients`` - ``eval_fn`` --> ``evaluate_fn`` -- Rename ``rnd`` to ``server_round``. This impacts multiple methods and functions, for example, ``configure_fit``, ``aggregate_fit``, ``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``. +- Rename ``rnd`` to ``server_round``. This impacts multiple methods and functions, for + example, ``configure_fit``, ``aggregate_fit``, ``configure_evaluate``, + ``aggregate_evaluate``, and ``evaluate_fn``. - Add ``server_round`` and ``config`` to ``evaluate_fn``: - - Flower 0.19: ``def evaluate(parameters: NDArrays) -> Optional[Tuple[float, Dict[str, Scalar]]]:`` - - Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, Scalar]]]:`` + - Flower 0.19: ``def evaluate(parameters: NDArrays) -> Optional[Tuple[float, Dict[str, + Scalar]]]:`` + - Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, config: + Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, Scalar]]]:`` Custom strategies ~~~~~~~~~~~~~~~~~ -- The type of parameter ``failures`` has changed from ``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], BaseException]]`` (in ``aggregate_fit``) and ``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in ``aggregate_evaluate``) -- The ``Strategy`` method ``evaluate`` now receives the current round of federated learning/evaluation as the first parameter: +- The type of parameter ``failures`` has changed from ``List[BaseException]`` to + ``List[Union[Tuple[ClientProxy, FitRes], BaseException]]`` (in ``aggregate_fit``) and + ``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in + ``aggregate_evaluate``) +- The ``Strategy`` method ``evaluate`` now receives the current round of federated + learning/evaluation as the first parameter: - - Flower 0.19: ``def evaluate(self, parameters: Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:`` - - Flower 1.0: ``def evaluate(self, server_round: int, parameters: Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:`` + - Flower 0.19: ``def evaluate(self, parameters: Parameters) -> Optional[Tuple[float, + Dict[str, Scalar]]]:`` + - Flower 1.0: ``def evaluate(self, server_round: int, parameters: Parameters) -> + Optional[Tuple[float, Dict[str, Scalar]]]:`` Optional improvements --------------------- -Along with the necessary changes above, there are a number of potential improvements that just became possible: - -- Remove "placeholder" methods from subclasses of ``Client`` or ``NumPyClient``. If you, for example, use server-side evaluation, then empty placeholder implementations of ``evaluate`` are no longer necessary. -- Configure the round timeout via ``start_simulation``: ``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), ...)`` +Along with the necessary changes above, there are a number of potential improvements +that just became possible: +- Remove "placeholder" methods from subclasses of ``Client`` or ``NumPyClient``. If you, + for example, use server-side evaluation, then empty placeholder implementations of + ``evaluate`` are no longer necessary. +- Configure the round timeout via ``start_simulation``: ``start_simulation(..., + config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), ...)`` Further help ------------ -Most official `Flower code examples `_ are already updated to Flower 1.0, they can serve as a reference for using the Flower 1.0 API. If there are further questions, `join the Flower Slack `_ and use the channel ``#questions``. +Most official `Flower code examples +`_ are already updated to Flower 1.0, +they can serve as a reference for using the Flower 1.0 API. If there are further +questions, `join the Flower Slack `_ and use the channel +``#questions``. diff --git a/doc/source/how-to-upgrade-to-flower-next.rst b/doc/source/how-to-upgrade-to-flower-next.rst index f378e92dbba4..e1fc350deb8b 100644 --- a/doc/source/how-to-upgrade-to-flower-next.rst +++ b/doc/source/how-to-upgrade-to-flower-next.rst @@ -1,11 +1,13 @@ Upgrade to Flower Next ====================== -Welcome to the migration guide for updating Flower to Flower Next! Whether you're a seasoned user -or just getting started, this guide will help you smoothly transition your existing setup to take -advantage of the latest features and improvements in Flower Next, starting from version 1.8. +Welcome to the migration guide for updating Flower to Flower Next! Whether you're a +seasoned user or just getting started, this guide will help you smoothly transition your +existing setup to take advantage of the latest features and improvements in Flower Next, +starting from version 1.8. .. note:: + This guide shows how to reuse pre-``1.8`` Flower code with minimum code changes by using the *compatibility layer* in Flower Next. In another guide, we will show how to run Flower Next end-to-end with pure Flower Next APIs. @@ -18,26 +20,44 @@ Let's dive in! - https://github.com/jgm/pandoc/issues/3973#issuecomment-337087394 .. |clientapp_link| replace:: ``ClientApp()`` + .. |serverapp_link| replace:: ``ServerApp()`` + .. |startclient_link| replace:: ``start_client()`` + .. |startserver_link| replace:: ``start_server()`` + .. |startsim_link| replace:: ``start_simulation()`` + .. |runsim_link| replace:: ``run_simulation()`` + .. |flowernext_superlink_link| replace:: ``flower-superlink`` + .. |flowernext_clientapp_link| replace:: ``flower-client-app`` + .. |flowernext_serverapp_link| replace:: ``flower-server-app`` + .. |flower_simulation_link| replace:: ``flower-simulation`` + .. _clientapp_link: ref-api/flwr.client.ClientApp.html + +.. _flower_simulation_link: ref-api-cli.html#flower-simulation + +.. _flowernext_clientapp_link: ref-api-cli.html#flower-client-app + +.. _flowernext_serverapp_link: ref-api-cli.html#flower-server-app + +.. _flowernext_superlink_link: ref-api-cli.html#flower-superlink + +.. _runsim_link: ref-api/flwr.simulation.run_simulation.html + .. _serverapp_link: ref-api/flwr.server.ServerApp.html + .. _startclient_link: ref-api/flwr.client.start_client.html + .. _startserver_link: ref-api/flwr.server.start_server.html -.. _startsim_link: ref-api/flwr.simulation.start_simulation.html -.. _runsim_link: ref-api/flwr.simulation.run_simulation.html -.. _flowernext_superlink_link: ref-api-cli.html#flower-superlink -.. _flowernext_clientapp_link: ref-api-cli.html#flower-client-app -.. _flowernext_serverapp_link: ref-api-cli.html#flower-server-app -.. _flower_simulation_link: ref-api-cli.html#flower-simulation +.. _startsim_link: ref-api/flwr.simulation.start_simulation.html Install update -------------- @@ -48,19 +68,18 @@ Using pip Here's how to update an existing installation of Flower to Flower Next with ``pip``: .. code-block:: bash - + $ python -m pip install -U flwr or if you need Flower Next with simulation: .. code-block:: bash - - $ python -m pip install -U "flwr[simulation]" + $ python -m pip install -U "flwr[simulation]" Ensure you set the following version constraint in your ``requirements.txt`` -.. code-block:: +.. code-block:: # Without simulation support flwr>=1.8,<2.0 @@ -81,7 +100,8 @@ or ``pyproject.toml``: Using Poetry ~~~~~~~~~~~~ -Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before running ``poetry install``). +Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall (don't forget to +delete ``poetry.lock`` via ``rm poetry.lock`` before running ``poetry install``). Ensure you set the following version constraint in your ``pyproject.toml``: @@ -100,13 +120,16 @@ Required changes ---------------- In Flower Next, the *infrastructure* and *application layers* have been decoupled. -Instead of starting a client in code via ``start_client()``, you create a |clientapp_link|_ and start it via the command line. -Instead of starting a server in code via ``start_server()``, you create a |serverapp_link|_ and start it via the command line. -The long-running components of server and client are called SuperLink and SuperNode. -The following non-breaking changes that require manual updates and allow you to run your project both in the traditional way and in the Flower Next way: +Instead of starting a client in code via ``start_client()``, you create a +|clientapp_link|_ and start it via the command line. Instead of starting a server in +code via ``start_server()``, you create a |serverapp_link|_ and start it via the command +line. The long-running components of server and client are called SuperLink and +SuperNode. The following non-breaking changes that require manual updates and allow you +to run your project both in the traditional way and in the Flower Next way: |clientapp_link|_ ~~~~~~~~~~~~~~~~~ + - Wrap your existing client with |clientapp_link|_ instead of launching it via |startclient_link|_. Here's an example: @@ -115,23 +138,25 @@ The following non-breaking changes that require manual updates and allow you to # Flower 1.8 def client_fn(cid: str): - return flwr.client.FlowerClient().to_client() - + return flwr.client.FlowerClient().to_client() + + app = flwr.client.ClientApp( - client_fn=client_fn, + client_fn=client_fn, ) # Flower 1.7 if __name__ == "__main__": flwr.client.start_client( - server_address="127.0.0.1:8080", - client=flwr.client.FlowerClient().to_client(), + server_address="127.0.0.1:8080", + client=flwr.client.FlowerClient().to_client(), ) |serverapp_link|_ ~~~~~~~~~~~~~~~~~ -- Wrap your existing strategy with |serverapp_link|_ instead of starting the server - via |startserver_link|_. Here's an example: + +- Wrap your existing strategy with |serverapp_link|_ instead of starting the server via + |startserver_link|_. Here's an example: .. code-block:: python :emphasize-lines: 2,9 @@ -152,13 +177,14 @@ The following non-breaking changes that require manual updates and allow you to Deployment ~~~~~~~~~~ + - Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, in sequence, - |flowernext_clientapp_link|_ (2x) and |flowernext_serverapp_link|_. There is no need to - execute `client.py` and `server.py` as Python scripts. + |flowernext_clientapp_link|_ (2x) and |flowernext_serverapp_link|_. There is no need + to execute `client.py` and `server.py` as Python scripts. - Here's an example to start the server without HTTPS (only for prototyping): .. code-block:: bash - + # Start a Superlink $ flower-superlink --insecure @@ -171,8 +197,9 @@ Deployment # In yet another terminal window, run the ServerApp (this starts the actual training run) $ flower-server-app server:app --insecure -- Here's another example to start with HTTPS. Use the ``--ssl-ca-certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line - options to pass paths to (CA certificate, server certificate, and server private key). +- Here's another example to start with HTTPS. Use the ``--ssl-ca-certfile``, + ``--ssl-certfile``, and ``--ssl-keyfile`` command line options to pass paths to (CA + certificate, server certificate, and server private key). .. code-block:: bash @@ -199,6 +226,7 @@ Deployment Simulation in CLI ~~~~~~~~~~~~~~~~~ + - Wrap your existing client and strategy with |clientapp_link|_ and |serverapp_link|_, respectively. There is no need to use |startsim_link|_ anymore. Here's an example: @@ -208,13 +236,16 @@ Simulation in CLI # Regular Flower client implementation class FlowerClient(NumPyClient): # ... + pass + # Flower 1.8 def client_fn(cid: str): - return FlowerClient().to_client() - + return FlowerClient().to_client() + + client_app = flwr.client.ClientApp( - client_fn=client_fn, + client_fn=client_fn, ) server_app = flwr.server.ServerApp( @@ -226,12 +257,12 @@ Simulation in CLI if __name__ == "__main__": hist = flwr.simulation.start_simulation( num_clients=100, - ... + # ... ) -- Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / ``client_app`` object in the - code instead of executing the Python script. Here's an example (assuming the - ``server_app`` and ``client_app`` objects are in a ``sim.py`` module): +- Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / ``client_app`` + object in the code instead of executing the Python script. Here's an example (assuming + the ``server_app`` and ``client_app`` objects are in a ``sim.py`` module): .. code-block:: bash @@ -246,8 +277,8 @@ Simulation in CLI # Flower 1.7 $ python sim.py -- Set default resources for each |clientapp_link|_ using the ``--backend-config`` command - line argument instead of setting the ``client_resources`` argument in +- Set default resources for each |clientapp_link|_ using the ``--backend-config`` + command line argument instead of setting the ``client_resources`` argument in |startsim_link|_. Here's an example: .. code-block:: bash @@ -266,26 +297,27 @@ Simulation in CLI # Flower 1.7 (in `sim.py`) if __name__ == "__main__": hist = flwr.simulation.start_simulation( - num_clients=100, - client_resources = {'num_cpus': 2, "num_gpus": 0.25}, - ... + num_clients=100, client_resources={"num_cpus": 2, "num_gpus": 0.25}, ... ) Simulation in a Notebook ~~~~~~~~~~~~~~~~~~~~~~~~ + - Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's an example: .. code-block:: python :emphasize-lines: 19,27 - NUM_CLIENTS = + NUM_CLIENTS = 10 # Replace by any integer greater than zero + def client_fn(cid: str): # ... - return FlowerClient().to_client() - + return FlowerClient().to_client() + + client_app = flwr.client.ClientApp( - client_fn=client_fn, + client_fn=client_fn, ) server_app = flwr.server.ServerApp( @@ -297,7 +329,7 @@ Simulation in a Notebook # Flower 1.8 flwr.simulation.run_simulation( - server_app=server_app, + server_app=server_app, client_app=client_app, num_supernodes=NUM_CLIENTS, backend_config=backend_config, @@ -312,18 +344,17 @@ Simulation in a Notebook client_resources=backend_config["client_resources"], ) - Further help ------------ Some official `Flower code examples `_ are already -updated to Flower Next so they can serve as a reference for using the Flower Next API. If there are -further questions, `join the Flower Slack `_ and use the channel ``#questions``. -You can also `participate in Flower Discuss `_ where you can find us -answering questions, or share and learn from others about migrating to Flower Next. +updated to Flower Next so they can serve as a reference for using the Flower Next API. +If there are further questions, `join the Flower Slack `_ +and use the channel ``#questions``. You can also `participate in Flower Discuss +`_ where you can find us answering questions, or share and +learn from others about migrating to Flower Next. .. admonition:: Important - :class: important As we continuously enhance Flower Next at a rapid pace, we'll be periodically updating this guide. Please feel free to share any feedback with us! diff --git a/doc/source/how-to-use-built-in-mods.rst b/doc/source/how-to-use-built-in-mods.rst index 341139175074..970b2055ec23 100644 --- a/doc/source/how-to-use-built-in-mods.rst +++ b/doc/source/how-to-use-built-in-mods.rst @@ -1,14 +1,19 @@ Use Built-in Mods ================= -**Note: This tutorial covers experimental features. The functionality and interfaces may change in future versions.** +**Note: This tutorial covers experimental features. The functionality and interfaces may +change in future versions.** -In this tutorial, we will learn how to utilize built-in mods to augment the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) allow us to perform operations before and after a task is processed in the ``ClientApp``. +In this tutorial, we will learn how to utilize built-in mods to augment the behavior of +a ``ClientApp``. Mods (sometimes also called Modifiers) allow us to perform operations +before and after a task is processed in the ``ClientApp``. What are Mods? -------------- -A Mod is a callable that wraps around a ``ClientApp``. It can manipulate or inspect the incoming ``Message`` and the resulting outgoing ``Message``. The signature for a ``Mod`` is as follows: +A Mod is a callable that wraps around a ``ClientApp``. It can manipulate or inspect the +incoming ``Message`` and the resulting outgoing ``Message``. The signature for a ``Mod`` +is as follows: .. code-block:: python @@ -51,12 +56,13 @@ Define your client function (``client_fn``) that will be wrapped by the mod(s): def client_fn(cid): # Your client code goes here. - return # your client + return # your client 3. Create the ``ClientApp`` with mods -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Create your ``ClientApp`` and pass the mods as a list to the ``mods`` argument. The order in which you provide the mods matters: +Create your ``ClientApp`` and pass the mods as a list to the ``mods`` argument. The +order in which you provide the mods matters: .. code-block:: python @@ -65,25 +71,31 @@ Create your ``ClientApp`` and pass the mods as a list to the ``mods`` argument. mods=[ example_mod_1, # Mod 1 example_mod_2, # Mod 2 - ] + ], ) Order of execution ------------------ -When the ``ClientApp`` runs, the mods are executed in the order they are provided in the list: +When the ``ClientApp`` runs, the mods are executed in the order they are provided in the +list: 1. ``example_mod_1`` (outermost mod) 2. ``example_mod_2`` (next mod) -3. Message handler (core function that handles the incoming ``Message`` and returns the outgoing ``Message``) +3. Message handler (core function that handles the incoming ``Message`` and returns the + outgoing ``Message``) 4. ``example_mod_2`` (on the way back) 5. ``example_mod_1`` (outermost mod on the way back) -Each mod has a chance to inspect and modify the incoming ``Message`` before passing it to the next mod, and likewise with the outgoing ``Message`` before returning it up the stack. +Each mod has a chance to inspect and modify the incoming ``Message`` before passing it +to the next mod, and likewise with the outgoing ``Message`` before returning it up the +stack. Conclusion ---------- -By following this guide, you have learned how to effectively use mods to enhance your ``ClientApp``'s functionality. Remember that the order of mods is crucial and affects how the input and output are processed. +By following this guide, you have learned how to effectively use mods to enhance your +``ClientApp``'s functionality. Remember that the order of mods is crucial and affects +how the input and output are processed. Enjoy building a more robust and flexible ``ClientApp`` with mods! diff --git a/doc/source/how-to-use-differential-privacy.rst b/doc/source/how-to-use-differential-privacy.rst index 5d4fa3dca1a4..67e54271bb2e 100644 --- a/doc/source/how-to-use-differential-privacy.rst +++ b/doc/source/how-to-use-differential-privacy.rst @@ -1,126 +1,151 @@ Use Differential Privacy ------------------------- -This guide explains how you can utilize differential privacy in the Flower framework. If you are not yet familiar with differential privacy, you can refer to :doc:`explanation-differential-privacy`. +======================== -.. warning:: +This guide explains how you can utilize differential privacy in the Flower framework. If +you are not yet familiar with differential privacy, you can refer to +:doc:`explanation-differential-privacy`. - Differential Privacy in Flower is in a preview phase. If you plan to use these features in a production environment with sensitive data, feel free contact us to discuss your requirements and to receive guidance on how to best use these features. +.. warning:: + Differential Privacy in Flower is in a preview phase. If you plan to use these + features in a production environment with sensitive data, feel free contact us to + discuss your requirements and to receive guidance on how to best use these features. Central Differential Privacy -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -This approach consists of two separate phases: clipping of the updates and adding noise to the aggregated model. -For the clipping phase, Flower framework has made it possible to decide whether to perform clipping on the server side or the client side. - -- **Server-side Clipping**: This approach has the advantage of the server enforcing uniform clipping across all clients' updates and reducing the communication overhead for clipping values. However, it also has the disadvantage of increasing the computational load on the server due to the need to perform the clipping operation for all clients. -- **Client-side Clipping**: This approach has the advantage of reducing the computational overhead on the server. However, it also has the disadvantage of lacking centralized control, as the server has less control over the clipping process. +---------------------------- +This approach consists of two separate phases: clipping of the updates and adding noise +to the aggregated model. For the clipping phase, Flower framework has made it possible +to decide whether to perform clipping on the server side or the client side. +- **Server-side Clipping**: This approach has the advantage of the server enforcing + uniform clipping across all clients' updates and reducing the communication overhead + for clipping values. However, it also has the disadvantage of increasing the + computational load on the server due to the need to perform the clipping operation for + all clients. +- **Client-side Clipping**: This approach has the advantage of reducing the + computational overhead on the server. However, it also has the disadvantage of lacking + centralized control, as the server has less control over the clipping process. Server-side Clipping -^^^^^^^^^^^^^^^^^^^^ -For central DP with server-side clipping, there are two :code:`Strategy` classes that act as wrappers around the actual :code:`Strategy` instance (for example, :code:`FedAvg`). -The two wrapper classes are :code:`DifferentialPrivacyServerSideFixedClipping` and :code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and adaptive clipping. +~~~~~~~~~~~~~~~~~~~~ -.. image:: ./_static/DP/serversideCDP.png - :align: center - :width: 700 - :alt: server side clipping +For central DP with server-side clipping, there are two ``Strategy`` classes that act as +wrappers around the actual ``Strategy`` instance (for example, ``FedAvg``). The two +wrapper classes are ``DifferentialPrivacyServerSideFixedClipping`` and +``DifferentialPrivacyServerSideAdaptiveClipping`` for fixed and adaptive clipping. +.. image:: ./_static/DP/serversideCDP.png + :align: center + :width: 700 + :alt: server side clipping -The code sample below enables the :code:`FedAvg` strategy to use server-side fixed clipping using the :code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. -The same approach can be used with :code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the corresponding input parameters. +The code sample below enables the ``FedAvg`` strategy to use server-side fixed clipping +using the ``DifferentialPrivacyServerSideFixedClipping`` wrapper class. The same +approach can be used with ``DifferentialPrivacyServerSideAdaptiveClipping`` by adjusting +the corresponding input parameters. .. code-block:: python - from flwr.server.strategy import DifferentialPrivacyClientSideFixedClipping - - # Create the strategy - strategy = fl.server.strategy.FedAvg(...) - - # Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping wrapper - dp_strategy = DifferentialPrivacyServerSideFixedClipping( - strategy, - cfg.noise_multiplier, - cfg.clipping_norm, - cfg.num_sampled_clients, - ) + from flwr.server.strategy import DifferentialPrivacyClientSideFixedClipping + # Create the strategy + strategy = fl.server.strategy.FedAvg(...) + # Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping wrapper + dp_strategy = DifferentialPrivacyServerSideFixedClipping( + strategy, + cfg.noise_multiplier, + cfg.clipping_norm, + cfg.num_sampled_clients, + ) Client-side Clipping -^^^^^^^^^^^^^^^^^^^^ -For central DP with client-side clipping, the server sends the clipping value to selected clients on each round. -Clients can use existing Flower :code:`Mods` to perform the clipping. -Two mods are available for fixed and adaptive client-side clipping: :code:`fixedclipping_mod` and :code:`adaptiveclipping_mod` with corresponding server-side wrappers :code:`DifferentialPrivacyClientSideFixedClipping` and :code:`DifferentialPrivacyClientSideAdaptiveClipping`. +~~~~~~~~~~~~~~~~~~~~ -.. image:: ./_static/DP/clientsideCDP.png - :align: center - :width: 800 - :alt: client side clipping +For central DP with client-side clipping, the server sends the clipping value to +selected clients on each round. Clients can use existing Flower ``Mods`` to perform the +clipping. Two mods are available for fixed and adaptive client-side clipping: +``fixedclipping_mod`` and ``adaptiveclipping_mod`` with corresponding server-side +wrappers ``DifferentialPrivacyClientSideFixedClipping`` and +``DifferentialPrivacyClientSideAdaptiveClipping``. +.. image:: ./_static/DP/clientsideCDP.png + :align: center + :width: 800 + :alt: client side clipping -The code sample below enables the :code:`FedAvg` strategy to use differential privacy with client-side fixed clipping using both the :code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on the client, :code:`fixedclipping_mod`: +The code sample below enables the ``FedAvg`` strategy to use differential privacy with +client-side fixed clipping using both the ``DifferentialPrivacyClientSideFixedClipping`` +wrapper class and, on the client, ``fixedclipping_mod``: .. code-block:: python - from flwr.server.strategy import DifferentialPrivacyClientSideFixedClipping + from flwr.server.strategy import DifferentialPrivacyClientSideFixedClipping - # Create the strategy - strategy = fl.server.strategy.FedAvg(...) + # Create the strategy + strategy = fl.server.strategy.FedAvg(...) - # Wrap the strategy with the DifferentialPrivacyClientSideFixedClipping wrapper - dp_strategy = DifferentialPrivacyClientSideFixedClipping( - strategy, - cfg.noise_multiplier, - cfg.clipping_norm, - cfg.num_sampled_clients, - ) + # Wrap the strategy with the DifferentialPrivacyClientSideFixedClipping wrapper + dp_strategy = DifferentialPrivacyClientSideFixedClipping( + strategy, + cfg.noise_multiplier, + cfg.clipping_norm, + cfg.num_sampled_clients, + ) -In addition to the server-side strategy wrapper, the :code:`ClientApp` needs to configure the matching :code:`fixedclipping_mod` to perform the client-side clipping: +In addition to the server-side strategy wrapper, the ``ClientApp`` needs to configure +the matching ``fixedclipping_mod`` to perform the client-side clipping: .. code-block:: python - from flwr.client.mod import fixedclipping_mod - - # Add fixedclipping_mod to the client-side mods - app = fl.client.ClientApp( - client_fn=client_fn, - mods=[ - fixedclipping_mod, - ] - ) + from flwr.client.mod import fixedclipping_mod + # Add fixedclipping_mod to the client-side mods + app = fl.client.ClientApp( + client_fn=client_fn, + mods=[ + fixedclipping_mod, + ], + ) Local Differential Privacy -~~~~~~~~~~~~~~~~~~~~~~~~~~ -To utilize local differential privacy (DP) and add noise to the client model parameters before transmitting them to the server in Flower, you can use the `LocalDpMod`. The following hyperparameters need to be set: clipping norm value, sensitivity, epsilon, and delta. +-------------------------- + +To utilize local differential privacy (DP) and add noise to the client model parameters +before transmitting them to the server in Flower, you can use the `LocalDpMod`. The +following hyperparameters need to be set: clipping norm value, sensitivity, epsilon, and +delta. .. image:: ./_static/DP/localdp.png - :align: center - :width: 700 - :alt: local DP mod + :align: center + :width: 700 + :alt: local DP mod -Below is a code example that shows how to use :code:`LocalDpMod`: +Below is a code example that shows how to use ``LocalDpMod``: .. code-block:: python - from flwr.client.mod.localdp_mod import LocalDpMod - - # Create an instance of the mod with the required params - local_dp_obj = LocalDpMod( - cfg.clipping_norm, cfg.sensitivity, cfg.epsilon, cfg.delta - ) - # Add local_dp_obj to the client-side mods + from flwr.client.mod.localdp_mod import LocalDpMod - app = fl.client.ClientApp( - client_fn=client_fn, - mods=[local_dp_obj], - ) + # Create an instance of the mod with the required params + local_dp_obj = LocalDpMod(cfg.clipping_norm, cfg.sensitivity, cfg.epsilon, cfg.delta) + # Add local_dp_obj to the client-side mods + app = fl.client.ClientApp( + client_fn=client_fn, + mods=[local_dp_obj], + ) -Please note that the order of mods, especially those that modify parameters, is important when using multiple modifiers. Typically, differential privacy (DP) modifiers should be the last to operate on parameters. +Please note that the order of mods, especially those that modify parameters, is +important when using multiple modifiers. Typically, differential privacy (DP) modifiers +should be the last to operate on parameters. Local Training using Privacy Engines -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -For ensuring data instance-level privacy during local model training on the client side, consider leveraging privacy engines such as Opacus and TensorFlow Privacy. For examples of using Flower with these engines, please refer to the Flower examples directory (`Opacus `_, `Tensorflow Privacy `_). \ No newline at end of file +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For ensuring data instance-level privacy during local model training on the client side, +consider leveraging privacy engines such as Opacus and TensorFlow Privacy. For examples +of using Flower with these engines, please refer to the Flower examples directory +(`Opacus `_, `Tensorflow +Privacy `_). diff --git a/doc/source/how-to-use-strategies.rst b/doc/source/how-to-use-strategies.rst index 8ac120124951..b4803c6059b3 100644 --- a/doc/source/how-to-use-strategies.rst +++ b/doc/source/how-to-use-strategies.rst @@ -1,19 +1,21 @@ Use strategies ============== -Flower allows full customization of the learning process through the :code:`Strategy` abstraction. A number of built-in strategies are provided in the core framework. +Flower allows full customization of the learning process through the ``Strategy`` +abstraction. A number of built-in strategies are provided in the core framework. -There are three ways to customize the way Flower orchestrates the learning process on the server side: - -* Use an existing strategy, for example, :code:`FedAvg` -* Customize an existing strategy with callback functions -* Implement a novel strategy +There are three ways to customize the way Flower orchestrates the learning process on +the server side: +- Use an existing strategy, for example, ``FedAvg`` +- Customize an existing strategy with callback functions +- Implement a novel strategy Use an existing strategy ------------------------ -Flower comes with a number of popular federated learning strategies built-in. A built-in strategy can be instantiated as follows: +Flower comes with a number of popular federated learning strategies built-in. A built-in +strategy can be instantiated as follows: .. code-block:: python @@ -22,7 +24,9 @@ Flower comes with a number of popular federated learning strategies built-in. A strategy = fl.server.strategy.FedAvg() fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3), strategy=strategy) -This creates a strategy with all parameters left at their default values and passes it to the :code:`start_server` function. It is usually recommended to adjust a few parameters during instantiation: +This creates a strategy with all parameters left at their default values and passes it +to the ``start_server`` function. It is usually recommended to adjust a few parameters +during instantiation: .. code-block:: python @@ -35,22 +39,26 @@ This creates a strategy with all parameters left at their default values and pas ) fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3), strategy=strategy) - Customize an existing strategy with callback functions ------------------------------------------------------ -Existing strategies provide several ways to customize their behaviour. Callback functions allow strategies to call user-provided code during execution. +Existing strategies provide several ways to customize their behaviour. Callback +functions allow strategies to call user-provided code during execution. Configuring client fit and client evaluate ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The server can pass new configuration values to the client each round by providing a function to :code:`on_fit_config_fn`. The provided function will be called by the strategy and must return a dictionary of configuration key values pairs that will be sent to the client. -It must return a dictionary of arbitrary configuration values :code:`client.fit` and :code:`client.evaluate` functions during each round of federated learning. +The server can pass new configuration values to the client each round by providing a +function to ``on_fit_config_fn``. The provided function will be called by the strategy +and must return a dictionary of configuration key values pairs that will be sent to the +client. It must return a dictionary of arbitrary configuration values ``client.fit`` and +``client.evaluate`` functions during each round of federated learning. .. code-block:: python import flwr as fl + def get_on_fit_config_fn() -> Callable[[int], Dict[str, str]]: """Return a function which returns training configurations.""" @@ -64,6 +72,7 @@ It must return a dictionary of arbitrary configuration values :code:`client.fit return fit_config + strategy = fl.server.strategy.FedAvg( fraction_fit=0.1, min_fit_clients=10, @@ -72,18 +81,23 @@ It must return a dictionary of arbitrary configuration values :code:`client.fit ) fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3), strategy=strategy) -The :code:`on_fit_config_fn` can be used to pass arbitrary configuration values from server to client, and potentially change these values each round, for example, to adjust the learning rate. -The client will receive the dictionary returned by the :code:`on_fit_config_fn` in its own :code:`client.fit()` function. +The ``on_fit_config_fn`` can be used to pass arbitrary configuration values from server +to client, and potentially change these values each round, for example, to adjust the +learning rate. The client will receive the dictionary returned by the +``on_fit_config_fn`` in its own ``client.fit()`` function. -Similar to :code:`on_fit_config_fn`, there is also :code:`on_evaluate_config_fn` to customize the configuration sent to :code:`client.evaluate()` +Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` to customize +the configuration sent to ``client.evaluate()`` Configuring server-side evaluation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Server-side evaluation can be enabled by passing an evaluation function to :code:`evaluate_fn`. - +Server-side evaluation can be enabled by passing an evaluation function to +``evaluate_fn``. Implement a novel strategy -------------------------- -Writing a fully custom strategy is a bit more involved, but it provides the most flexibility. Read the `Implementing Strategies `_ guide to learn more. +Writing a fully custom strategy is a bit more involved, but it provides the most +flexibility. Read the `Implementing Strategies `_ +guide to learn more. diff --git a/doc/source/index.rst b/doc/source/index.rst index fe996db62ffb..197599d595a8 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -2,15 +2,16 @@ Flower Framework Documentation ============================== .. meta:: - :description: Check out the documentation of the main Flower Framework enabling easy Python development for Federated Learning. - -Welcome to Flower's documentation. `Flower `_ is a friendly federated learning framework. + :description: Check out the documentation of the main Flower Framework enabling easy Python development for Federated Learning. +Welcome to Flower's documentation. `Flower `_ is a friendly federated +learning framework. Join the Flower Community ------------------------- -The Flower Community is growing quickly - we're a friendly group of researchers, engineers, students, professionals, academics, and other enthusiasts. +The Flower Community is growing quickly - we're a friendly group of researchers, +engineers, students, professionals, academics, and other enthusiasts. .. button-link:: https://flower.ai/join-slack :color: primary @@ -18,13 +19,12 @@ The Flower Community is growing quickly - we're a friendly group of researchers, Join us on Slack - Flower Framework ---------------- -The user guide is targeted at researchers and developers who want to use Flower -to bring existing machine learning workloads into a federated setting. One of -Flower's design goals was to make this simple. Read on to learn more. +The user guide is targeted at researchers and developers who want to use Flower to bring +existing machine learning workloads into a federated setting. One of Flower's design +goals was to make this simple. Read on to learn more. Tutorials ~~~~~~~~~ @@ -32,44 +32,50 @@ Tutorials A learning-oriented series of federated learning tutorials, the best place to start. .. toctree:: - :maxdepth: 1 - :caption: Tutorial + :maxdepth: 1 + :caption: Tutorial - tutorial-series-what-is-federated-learning - tutorial-series-get-started-with-flower-pytorch - tutorial-series-use-a-federated-learning-strategy-pytorch - tutorial-series-build-a-strategy-from-scratch-pytorch - tutorial-series-customize-the-client-pytorch + tutorial-series-what-is-federated-learning + tutorial-series-get-started-with-flower-pytorch + tutorial-series-use-a-federated-learning-strategy-pytorch + tutorial-series-build-a-strategy-from-scratch-pytorch + tutorial-series-customize-the-client-pytorch .. toctree:: - :maxdepth: 1 - :caption: Quickstart tutorials - :hidden: - - tutorial-quickstart-pytorch - tutorial-quickstart-tensorflow - tutorial-quickstart-mlx - tutorial-quickstart-huggingface - tutorial-quickstart-jax - tutorial-quickstart-pandas - tutorial-quickstart-fastai - tutorial-quickstart-pytorch-lightning - tutorial-quickstart-scikitlearn - tutorial-quickstart-xgboost - tutorial-quickstart-android - tutorial-quickstart-ios - -QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow ` | :doc:`MLX ` | :doc:`🤗 Transformers ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS ` + :maxdepth: 1 + :caption: Quickstart tutorials + :hidden: + + tutorial-quickstart-pytorch + tutorial-quickstart-tensorflow + tutorial-quickstart-mlx + tutorial-quickstart-huggingface + tutorial-quickstart-jax + tutorial-quickstart-pandas + tutorial-quickstart-fastai + tutorial-quickstart-pytorch-lightning + tutorial-quickstart-scikitlearn + tutorial-quickstart-xgboost + tutorial-quickstart-android + tutorial-quickstart-ios + +QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow +` | :doc:`MLX ` | :doc:`🤗 +Transformers ` | :doc:`JAX ` | +:doc:`Pandas ` | :doc:`fastai ` +| :doc:`PyTorch Lightning ` | :doc:`scikit-learn +` | :doc:`XGBoost ` | +:doc:`Android ` | :doc:`iOS ` We also made video tutorials for PyTorch: -.. youtube:: jOmmuzMIQ4c - :width: 80% +.. youtube:: jOmmuzMIQ4c + :width: 80% And TensorFlow: -.. youtube:: FGTc2TQq7VM - :width: 80% +.. youtube:: FGTc2TQq7VM + :width: 80% How-to guides ~~~~~~~~~~~~~ @@ -77,45 +83,46 @@ How-to guides Problem-oriented how-to guides show step-by-step how to achieve a specific goal. .. toctree:: - :maxdepth: 1 - :caption: How-to guides - - how-to-install-flower - how-to-configure-clients - how-to-use-strategies - how-to-implement-strategies - how-to-aggregate-evaluation-results - how-to-save-and-load-model-checkpoints - how-to-run-simulations - how-to-monitor-simulation - how-to-configure-logging - how-to-enable-ssl-connections - how-to-use-built-in-mods - how-to-use-differential-privacy - how-to-authenticate-supernodes - docker/index - how-to-upgrade-to-flower-1.0 - how-to-upgrade-to-flower-next + :maxdepth: 1 + :caption: How-to guides + + how-to-install-flower + how-to-configure-clients + how-to-use-strategies + how-to-implement-strategies + how-to-aggregate-evaluation-results + how-to-save-and-load-model-checkpoints + how-to-run-simulations + how-to-monitor-simulation + how-to-configure-logging + how-to-enable-ssl-connections + how-to-use-built-in-mods + how-to-use-differential-privacy + how-to-authenticate-supernodes + docker/index + how-to-upgrade-to-flower-1.0 + how-to-upgrade-to-flower-next .. toctree:: - :maxdepth: 1 - :caption: Legacy example guides + :maxdepth: 1 + :caption: Legacy example guides - example-pytorch-from-centralized-to-federated - example-fedbn-pytorch-from-centralized-to-federated + example-pytorch-from-centralized-to-federated + example-fedbn-pytorch-from-centralized-to-federated Explanations ~~~~~~~~~~~~ -Understanding-oriented concept guides explain and discuss key topics and underlying ideas behind Flower and collaborative AI. +Understanding-oriented concept guides explain and discuss key topics and underlying +ideas behind Flower and collaborative AI. .. toctree:: - :maxdepth: 1 - :caption: Explanations + :maxdepth: 1 + :caption: Explanations - explanation-federated-evaluation - explanation-differential-privacy - explanation-flower-architecture + explanation-federated-evaluation + explanation-differential-privacy + explanation-flower-architecture References ~~~~~~~~~~ @@ -123,71 +130,77 @@ References Information-oriented API reference and other reference material. .. autosummary:: - :toctree: ref-api - :template: autosummary/module.rst - :caption: API reference - :recursive: + :toctree: ref-api + :template: autosummary/module.rst + :caption: API reference + :recursive: - flwr + flwr .. toctree:: - :maxdepth: 2 + :maxdepth: 2 - ref-api-cli + ref-api-cli .. toctree:: - :maxdepth: 1 - :caption: Reference docs - - ref-example-projects - ref-telemetry - ref-changelog - ref-faq + :maxdepth: 1 + :caption: Reference docs + ref-example-projects + ref-telemetry + ref-changelog + ref-faq Contributor docs ---------------- -The Flower community welcomes contributions. The following docs are intended to help along the way. - +The Flower community welcomes contributions. The following docs are intended to help +along the way. .. toctree:: - :maxdepth: 1 - :caption: Contributor tutorials + :maxdepth: 1 + :caption: Contributor tutorials - contributor-tutorial-contribute-on-github - contributor-tutorial-get-started-as-a-contributor + contributor-tutorial-contribute-on-github + contributor-tutorial-get-started-as-a-contributor .. toctree:: - :maxdepth: 1 - :caption: Contributor how-to guides + :maxdepth: 1 + :caption: Contributor how-to guides - contributor-how-to-install-development-versions - contributor-how-to-set-up-a-virtual-env - contributor-how-to-develop-in-vscode-dev-containers - contributor-how-to-write-documentation - contributor-how-to-release-flower - contributor-how-to-contribute-translations - contributor-how-to-build-docker-images + contributor-how-to-install-development-versions + contributor-how-to-set-up-a-virtual-env + contributor-how-to-develop-in-vscode-dev-containers + contributor-how-to-write-documentation + contributor-how-to-release-flower + contributor-how-to-contribute-translations + contributor-how-to-build-docker-images .. toctree:: - :maxdepth: 1 - :caption: Contributor explanations + :maxdepth: 1 + :caption: Contributor explanations - contributor-explanation-public-and-private-apis + contributor-explanation-public-and-private-apis .. toctree:: - :maxdepth: 1 - :caption: Contributor references + :maxdepth: 1 + :caption: Contributor references + + fed/index + contributor-ref-good-first-contributions + contributor-ref-secure-aggregation-protocols + +.. + Indices and tables - fed/index - contributor-ref-good-first-contributions - contributor-ref-secure-aggregation-protocols +.. + ------------------ +.. + * :ref:`genindex` -.. Indices and tables -.. ------------------ +.. + * :ref:`modindex` -.. * :ref:`genindex` -.. * :ref:`modindex` -.. * :ref:`search` +.. + * :ref:`search` diff --git a/doc/source/ref-api-cli.rst b/doc/source/ref-api-cli.rst index 95664b2f490a..e95132bbadba 100644 --- a/doc/source/ref-api-cli.rst +++ b/doc/source/ref-api-cli.rst @@ -4,64 +4,66 @@ Flower CLI reference .. _flwr-apiref: flwr CLI -~~~~~~~~ +-------- .. click:: flwr.cli.app:typer_click_object - :prog: flwr - :nested: full + :prog: flwr + :nested: full .. _flower-simulation-apiref: flower-simulation -~~~~~~~~~~~~~~~~~ +----------------- .. argparse:: - :module: flwr.simulation.run_simulation - :func: _parse_args_run_simulation - :prog: flower-simulation + :module: flwr.simulation.run_simulation + :func: _parse_args_run_simulation + :prog: flower-simulation .. _flower-superlink-apiref: flower-superlink -~~~~~~~~~~~~~~~~ +---------------- .. argparse:: - :module: flwr.server.app - :func: _parse_args_run_superlink - :prog: flower-superlink + :module: flwr.server.app + :func: _parse_args_run_superlink + :prog: flower-superlink .. _flower-supernode-apiref: flower-supernode -~~~~~~~~~~~~~~~~~ +---------------- .. argparse:: - :module: flwr.client.supernode.app - :func: _parse_args_run_supernode - :prog: flower-supernode + :module: flwr.client.supernode.app + :func: _parse_args_run_supernode + :prog: flower-supernode .. _flower-server-app-apiref: flower-server-app -~~~~~~~~~~~~~~~~~ +----------------- .. note:: - Note that since version :code:`1.11.0`, :code:`flower-server-app` no longer supports passing a reference to a `ServerApp` attribute. - Instead, you need to pass the path to Flower app via the argument :code:`--app`. - This is the path to a directory containing a `pyproject.toml`. - You can create a valid Flower app by executing :code:`flwr new` and following the prompt. + + Note that since version ``1.11.0``, ``flower-server-app`` no longer supports passing + a reference to a `ServerApp` attribute. Instead, you need to pass the path to Flower + app via the argument ``--app``. This is the path to a directory containing a + `pyproject.toml`. You can create a valid Flower app by executing ``flwr new`` and + following the prompt. .. argparse:: - :module: flwr.server.run_serverapp - :func: _parse_args_run_server_app - :prog: flower-server-app + :module: flwr.server.run_serverapp + :func: _parse_args_run_server_app + :prog: flower-server-app .. _flower-superexec-apiref: flower-superexec -~~~~~~~~~~~~~~~~~ +---------------- .. argparse:: - :module: flwr.superexec.app - :func: _parse_args_run_superexec - :prog: flower-superexec \ No newline at end of file + :module: flwr.superexec.app + :func: _parse_args_run_superexec + :prog: flower-superexec diff --git a/doc/source/ref-example-projects.rst b/doc/source/ref-example-projects.rst index 597e3a596c51..4f0a3014e1d4 100644 --- a/doc/source/ref-example-projects.rst +++ b/doc/source/ref-example-projects.rst @@ -1,48 +1,52 @@ Example projects ================ -Flower comes with a number of usage examples. The examples demonstrate how -Flower can be used to federate different kinds of existing machine learning -pipelines, usually leveraging popular machine learning frameworks such as -`PyTorch `_ or -`TensorFlow `_. +Flower comes with a number of usage examples. The examples demonstrate how Flower can be +used to federate different kinds of existing machine learning pipelines, usually +leveraging popular machine learning frameworks such as `PyTorch `_ +or `TensorFlow `_. The following examples are available as standalone projects. + Quickstart TensorFlow/Keras --------------------------- -The TensorFlow/Keras quickstart example shows CIFAR-10 image classification -with MobileNetV2: +The TensorFlow/Keras quickstart example shows CIFAR-10 image classification with +MobileNetV2: -- `Quickstart TensorFlow (Code) `_ +- `Quickstart TensorFlow (Code) + `_ - :doc:`Quickstart TensorFlow (Tutorial) ` -- `Quickstart TensorFlow (Blog Post) `_ - +- `Quickstart TensorFlow (Blog Post) + `_ Quickstart PyTorch ------------------ -The PyTorch quickstart example shows CIFAR-10 image classification -with a simple Convolutional Neural Network: +The PyTorch quickstart example shows CIFAR-10 image classification with a simple +Convolutional Neural Network: -- `Quickstart PyTorch (Code) `_ +- `Quickstart PyTorch (Code) + `_ - :doc:`Quickstart PyTorch (Tutorial) ` - PyTorch: From Centralized To Federated -------------------------------------- This example shows how a regular PyTorch project can be federated using Flower: -- `PyTorch: From Centralized To Federated (Code) `_ -- :doc:`PyTorch: From Centralized To Federated (Tutorial) ` - +- `PyTorch: From Centralized To Federated (Code) + `_ +- :doc:`PyTorch: From Centralized To Federated (Tutorial) + ` Federated Learning on Raspberry Pi and Nvidia Jetson ---------------------------------------------------- -This example shows how Flower can be used to build a federated learning system that run across Raspberry Pi and Nvidia Jetson: - -- `Federated Learning on Raspberry Pi and Nvidia Jetson (Code) `_ -- `Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) `_ +This example shows how Flower can be used to build a federated learning system that run +across Raspberry Pi and Nvidia Jetson: +- `Federated Learning on Raspberry Pi and Nvidia Jetson (Code) + `_ +- `Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) + `_ diff --git a/doc/source/ref-faq.rst b/doc/source/ref-faq.rst index e3bd754d481c..0bd004f81858 100644 --- a/doc/source/ref-faq.rst +++ b/doc/source/ref-faq.rst @@ -1,7 +1,8 @@ FAQ === -This page collects answers to commonly asked questions about Federated Learning with Flower. +This page collects answers to commonly asked questions about Federated Learning with +Flower. .. dropdown:: :fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab? diff --git a/doc/source/tutorial-quickstart-android.rst b/doc/source/tutorial-quickstart-android.rst index 9177236d5a7c..f2691203078c 100644 --- a/doc/source/tutorial-quickstart-android.rst +++ b/doc/source/tutorial-quickstart-android.rst @@ -1,12 +1,12 @@ .. _quickstart-android: - Quickstart Android ================== .. meta:: - :description: Read this Federated Learning quickstart tutorial for creating an Android app using Flower. + :description: Read this Federated Learning quickstart tutorial for creating an Android app using Flower. Let's build a federated learning system using TFLite and Flower on Android! -Please refer to the `full code example `_ to learn more. +Please refer to the `full code example +`_ to learn more. diff --git a/doc/source/tutorial-quickstart-fastai.rst b/doc/source/tutorial-quickstart-fastai.rst index e42328e6f712..d52c570b0195 100644 --- a/doc/source/tutorial-quickstart-fastai.rst +++ b/doc/source/tutorial-quickstart-fastai.rst @@ -1,113 +1,110 @@ .. _quickstart-fastai: -################### - Quickstart fastai -################### +Quickstart fastai +================= -In this federated learning tutorial we will learn how to train a -SqueezeNet model on MNIST using Flower and fastai. It is recommended to -create a virtual environment and run everything within a -:doc:`virtualenv `. +In this federated learning tutorial we will learn how to train a SqueezeNet model on +MNIST using Flower and fastai. It is recommended to create a virtual environment and run +everything within a :doc:`virtualenv `. Then, clone the code example directly from GitHub: -.. code:: shell +.. code-block:: shell - git clone --depth=1 https://github.com/adap/flower.git _tmp \ - && mv _tmp/examples/quickstart-fastai . \ - && rm -rf _tmp && cd quickstart-fastai + git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-fastai . \ + && rm -rf _tmp && cd quickstart-fastai -This will create a new directory called `quickstart-fastai` containing -the following files: +This will create a new directory called `quickstart-fastai` containing the following +files: -.. code:: shell +.. code-block:: shell - quickstart-fastai - ├── fastai_example - │ ├── client_app.py # Defines your ClientApp - │ ├── server_app.py # Defines your ServerApp - │ └── task.py # Defines your model, training and data loading - ├── pyproject.toml # Project metadata like dependencies and configs - └── README.md + quickstart-fastai + ├── fastai_example + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md Next, activate your environment, then run: -.. code:: shell +.. code-block:: shell - # Navigate to the example directory - $ cd path/to/quickstart-fastai + # Navigate to the example directory + $ cd path/to/quickstart-fastai - # Install project and dependencies - $ pip install -e . + # Install project and dependencies + $ pip install -e . -This example by default runs the Flower Simulation Engine, creating a -federation of 10 nodes using `FedAvg +This example by default runs the Flower Simulation Engine, creating a federation of 10 +nodes using `FedAvg `_ -as the aggregation strategy. The dataset will be partitioned using -Flower Dataset's `IidPartitioner +as the aggregation strategy. The dataset will be partitioned using Flower Dataset's +`IidPartitioner `_. Let's run the project: -.. code:: shell +.. code-block:: shell - # Run with default arguments - $ flwr run . + # Run with default arguments + $ flwr run . With default arguments you will see an output like this one: -.. code:: shell - - Loading project configuration... - Success - INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout - INFO : - INFO : [INIT] - INFO : Using initial global parameters provided by strategy - INFO : Starting evaluation of initial global parameters - INFO : Evaluation returned no results (`None`) - INFO : - INFO : [ROUND 1] - INFO : configure_fit: strategy sampled 5 clients (out of 10) - INFO : aggregate_fit: received 5 results and 0 failures - WARNING : No fit_metrics_aggregation_fn provided - INFO : configure_evaluate: strategy sampled 5 clients (out of 10) - INFO : aggregate_evaluate: received 5 results and 0 failures - INFO : - INFO : [ROUND 2] - INFO : configure_fit: strategy sampled 5 clients (out of 10) - INFO : aggregate_fit: received 5 results and 0 failures - INFO : configure_evaluate: strategy sampled 5 clients (out of 10) - INFO : aggregate_evaluate: received 5 results and 0 failures - INFO : - INFO : [ROUND 3] - INFO : configure_fit: strategy sampled 5 clients (out of 10) - INFO : aggregate_fit: received 5 results and 0 failures - INFO : configure_evaluate: strategy sampled 5 clients (out of 10) - INFO : aggregate_evaluate: received 5 results and 0 failures - INFO : - INFO : [SUMMARY] - INFO : Run finished 3 round(s) in 143.02s - INFO : History (loss, distributed): - INFO : round 1: 2.699497365951538 - INFO : round 2: 0.9549586296081543 - INFO : round 3: 0.6627192616462707 - INFO : History (metrics, distributed, evaluate): - INFO : {'accuracy': [(1, 0.09766666889190674), - INFO : (2, 0.6948333323001862), - INFO : (3, 0.7721666693687439)]} - INFO : - -You can also override the parameters defined in the -``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this: - -.. code:: shell - - # Override some arguments - $ flwr run . --run-config num-server-rounds=5 +.. code-block:: shell + + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Using initial global parameters provided by strategy + INFO : Starting evaluation of initial global parameters + INFO : Evaluation returned no results (`None`) + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 5 clients (out of 10) + INFO : aggregate_evaluate: received 5 results and 0 failures + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 5 clients (out of 10) + INFO : aggregate_evaluate: received 5 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 5 clients (out of 10) + INFO : aggregate_evaluate: received 5 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 143.02s + INFO : History (loss, distributed): + INFO : round 1: 2.699497365951538 + INFO : round 2: 0.9549586296081543 + INFO : round 3: 0.6627192616462707 + INFO : History (metrics, distributed, evaluate): + INFO : {'accuracy': [(1, 0.09766666889190674), + INFO : (2, 0.6948333323001862), + INFO : (3, 0.7721666693687439)]} + INFO : + +You can also override the parameters defined in the ``[tool.flwr.app.config]`` section +in ``pyproject.toml`` like this: + +.. code-block:: shell + + # Override some arguments + $ flwr run . --run-config num-server-rounds=5 .. note:: - Check the `source code - `_ - of this tutorial in ``examples/quickstart-fasai`` in the Flower - GitHub repository. + Check the `source code + `_ of this + tutorial in ``examples/quickstart-fasai`` in the Flower GitHub repository. diff --git a/doc/source/tutorial-quickstart-huggingface.rst b/doc/source/tutorial-quickstart-huggingface.rst index e5caa3b19dd6..3c9d3981e587 100644 --- a/doc/source/tutorial-quickstart-huggingface.rst +++ b/doc/source/tutorial-quickstart-huggingface.rst @@ -1,419 +1,385 @@ .. _quickstart-huggingface: -########################### - Quickstart 🤗 Transformers -########################### +Quickstart 🤗 Transformers +========================== -In this federated learning tutorial we will learn how to train a large -language model (LLM) on the `IMDB -`_ dataset using -Flower and the 🤗 Hugging Face Transformers library. It is recommended to -create a virtual environment and run everything within a -:doc:`virtualenv `. +In this federated learning tutorial we will learn how to train a large language model +(LLM) on the `IMDB `_ dataset using +Flower and the 🤗 Hugging Face Transformers library. It is recommended to create a +virtual environment and run everything within a :doc:`virtualenv +`. -Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face -project. It will generate all the files needed to run, by default with -the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ -The dataset will be partitioned using |flowerdatasets|_'s -|iidpartitioner|_. +Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face project. It will +generate all the files needed to run, by default with the Flower Simulation Engine, a +federation of 10 nodes using |fedavg|_ The dataset will be partitioned using +|flowerdatasets|_'s |iidpartitioner|_. -Now that we have a rough idea of what this example is about, let's get -started. First, install Flower in your new environment: +Now that we have a rough idea of what this example is about, let's get started. First, +install Flower in your new environment: -.. code:: shell +.. code-block:: shell - # In a new Python environment - $ pip install flwr + # In a new Python environment + $ pip install flwr -Then, run the command below. You will be prompted to select one of the -available templates (choose ``HuggingFace``), give a name to your -project, and type in your developer name: +Then, run the command below. You will be prompted to select one of the available +templates (choose ``HuggingFace``), give a name to your project, and type in your +developer name: -.. code:: shell +.. code-block:: shell - $ flwr new + $ flwr new -After running it you'll notice a new directory with your project name -has been created. It should have the following structure: +After running it you'll notice a new directory with your project name has been created. +It should have the following structure: -.. code:: shell +.. code-block:: shell - - ├── - │ ├── __init__.py - │ ├── client_app.py # Defines your ClientApp - │ ├── server_app.py # Defines your ServerApp - │ └── task.py # Defines your model, training and data loading - ├── pyproject.toml # Project metadata like dependencies and configs - └── README.md + + ├── + │ ├── __init__.py + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md -If you haven't yet installed the project and its dependencies, you can -do so by: +If you haven't yet installed the project and its dependencies, you can do so by: -.. code:: shell +.. code-block:: shell - # From the directory where your pyproject.toml is - $ pip install -e . + # From the directory where your pyproject.toml is + $ pip install -e . To run the project, do: -.. code:: shell +.. code-block:: shell - # Run with default arguments - $ flwr run . + # Run with default arguments + $ flwr run . With default arguments you will see an output like this one: -.. code:: shell - - Loading project configuration... - Success - INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout - INFO : - INFO : [INIT] - INFO : Using initial global parameters provided by strategy - INFO : Starting evaluation of initial global parameters - INFO : Evaluation returned no results (`None`) - INFO : - INFO : [ROUND 1] - INFO : configure_fit: strategy sampled 2 clients (out of 10) - INFO : aggregate_fit: received 2 results and 0 failures - WARNING : No fit_metrics_aggregation_fn provided - INFO : configure_evaluate: strategy sampled 10 clients (out of 10) - INFO : aggregate_evaluate: received 10 results and 0 failures - WARNING : No evaluate_metrics_aggregation_fn provided - INFO : - INFO : [ROUND 2] - INFO : configure_fit: strategy sampled 5 clients (out of 10) - INFO : aggregate_fit: received 5 results and 0 failures - INFO : configure_evaluate: strategy sampled 10 clients (out of 10) - INFO : aggregate_evaluate: received 10 results and 0 failures - INFO : - INFO : [ROUND 3] - INFO : configure_fit: strategy sampled 5 clients (out of 10) - INFO : aggregate_fit: received 5 results and 0 failures - INFO : configure_evaluate: strategy sampled 10 clients (out of 10) - INFO : aggregate_evaluate: received 10 results and 0 failures - INFO : - INFO : [SUMMARY] - INFO : Run finished 3 round(s) in 249.11s - INFO : History (loss, distributed): - INFO : round 1: 0.02111011856794357 - INFO : round 2: 0.019722302150726317 - INFO : round 3: 0.018227258533239362 - INFO : +.. code-block:: shell + + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Using initial global parameters provided by strategy + INFO : Starting evaluation of initial global parameters + INFO : Evaluation returned no results (`None`) + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 2 clients (out of 10) + INFO : aggregate_fit: received 2 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 249.11s + INFO : History (loss, distributed): + INFO : round 1: 0.02111011856794357 + INFO : round 2: 0.019722302150726317 + INFO : round 3: 0.018227258533239362 + INFO : You can also run the project with GPU as follows: -.. code:: shell - - # Run with default arguments - $ flwr run . localhost-gpu - -This will use the default arguments where each ``ClientApp`` will use 2 -CPUs and at most 4 ``ClientApp``\s will run in a given GPU. - -You can also override the parameters defined in the -``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this: - -.. code:: shell - - # Override some arguments - $ flwr run . --run-config "num-server-rounds=5 fraction-fit=0.2" - -What follows is an explanation of each component in the project you just -created: dataset partition, the model, defining the ``ClientApp`` and -defining the ``ServerApp``. - -********** - The Data -********** - -This tutorial uses |flowerdatasets|_ to easily download and partition -the `IMDB `_ dataset. -In this example you'll make use of the |iidpartitioner|_ to generate -``num_partitions`` partitions. You can choose |otherpartitioners|_ -available in Flower Datasets. To tokenize the text, we will also load -the tokenizer from the pre-trained Transformer model that we'll use -during training - more on that in the next section. Each ``ClientApp`` -will call this function to create dataloaders with the data that -correspond to their data partition. - -.. code:: python - - partitioner = IidPartitioner(num_partitions=num_partitions) - fds = FederatedDataset( - dataset="stanfordnlp/imdb", - partitioners={"train": partitioner}, - ) - partition = fds.load_partition(partition_id) - # Divide data: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2, seed=42) - - tokenizer = AutoTokenizer.from_pretrained(model_name) - - def tokenize_function(examples): - return tokenizer( - examples["text"], truncation=True, add_special_tokens=True, max_length=512 - ) - - partition_train_test = partition_train_test.map(tokenize_function, batched=True) - partition_train_test = partition_train_test.remove_columns("text") - partition_train_test = partition_train_test.rename_column("label", "labels") - - data_collator = DataCollatorWithPadding(tokenizer=tokenizer) - trainloader = DataLoader( - partition_train_test["train"], - shuffle=True, - batch_size=32, - collate_fn=data_collator, - ) - - testloader = DataLoader( - partition_train_test["test"], batch_size=32, collate_fn=data_collator - ) - -*********** - The Model -*********** - -We will leverage 🤗 Hugging Face to federate the training of language -models over multiple clients using Flower. More specifically, we will -fine-tune a pre-trained Transformer model (|berttiny|_) for sequence -classification over the dataset of IMDB ratings. The end goal is to -detect if a movie rating is positive or negative. If you have access to -larger GPUs, feel free to use larger models! - -.. code:: python - - net = AutoModelForSequenceClassification.from_pretrained( - model_name, num_labels=num_labels - ) - -Note that here, ``model_name`` is a string that will be loaded from the -``Context`` in the ClientApp and ServerApp. - -In addition to loading the pretrained model weights and architecture, we -also include two utility functions to perform both training (i.e. -``train()``) and evaluation (i.e. ``test()``) using the above model. -These functions should look fairly familiar if you have some prior -experience with PyTorch. Note these functions do not have anything -specific to Flower. That being said, the training function will normally -be called, as we'll see later, from a Flower client passing its own -data. In summary, your clients can use standard training/testing -functions to perform local training or evaluation: - -.. code:: python - - def train(net, trainloader, epochs, device): - optimizer = AdamW(net.parameters(), lr=5e-5) - net.train() - for _ in range(epochs): - for batch in trainloader: - batch = {k: v.to(device) for k, v in batch.items()} - outputs = net(**batch) - loss = outputs.loss - loss.backward() - optimizer.step() - optimizer.zero_grad() - - - def test(net, testloader, device): - metric = load_metric("accuracy") - loss = 0 - net.eval() - for batch in testloader: - batch = {k: v.to(device) for k, v in batch.items()} - with torch.no_grad(): - outputs = net(**batch) - logits = outputs.logits - loss += outputs.loss.item() - predictions = torch.argmax(logits, dim=-1) - metric.add_batch(predictions=predictions, references=batch["labels"]) - loss /= len(testloader.dataset) - accuracy = metric.compute()["accuracy"] - return loss, accuracy - -*************** - The ClientApp -*************** - -The main changes we have to make to use 🤗 Hugging Face with Flower will -be found in the ``get_weights()`` and ``set_weights()`` functions. Under -the hood, the ``transformers`` library uses PyTorch, which means we can -reuse the ``get_weights()`` and ``set_weights()`` code that we defined -in the :doc:`Quickstart PyTorch ` tutorial. -As a reminder, in ``get_weights()``, PyTorch model parameters are -extracted and represented as a list of NumPy arrays. The -``set_weights()`` function that's the opposite: given a list of NumPy -arrays it applies them to an existing PyTorch model. Doing this in -fairly easy in PyTorch. +.. code-block:: shell -.. note:: - - The specific implementation of ``get_weights()`` and - ``set_weights()`` depends on the type of models you use. The ones - shown below work for a wide range of PyTorch models but you might - need to adjust them if you have more exotic model architectures. - -.. code:: python - - def get_weights(net): - return [val.cpu().numpy() for _, val in net.state_dict().items()] - - def set_weights(net, parameters): - params_dict = zip(net.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - net.load_state_dict(state_dict, strict=True) - -The rest of the functionality is directly inspired by the centralized -case. The ``fit()`` method in the client trains the model using the -local dataset. Similarly, the ``evaluate()`` method is used to evaluate -the model received on a held-out validation set that the client might -have: - -.. code:: python - - class FlowerClient(NumPyClient): - def __init__(self, net, trainloader, testloader, local_epochs): - self.net = net - self.trainloader = trainloader - self.testloader = testloader - self.local_epochs = local_epochs - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - self.net.to(self.device) - - def fit(self, parameters, config): - set_weights(self.net, parameters) - train(self.net, self.trainloader, epochs=self.local_epochs, device=self.device) - return get_weights(self.net), len(self.trainloader), {} - - def evaluate(self, parameters, config): - set_weights(self.net, parameters) - loss, accuracy = test(self.net, self.testloader, self.device) - return float(loss), len(self.testloader), {"accuracy": accuracy} - -Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` -defined above by means of a ``client_fn()`` callback. Note that the -`context` enables you to get access to hyperparemeters defined in your -``pyproject.toml`` to configure the run. In this tutorial we access the -``local-epochs`` setting to control the number of epochs a ``ClientApp`` -will perform when running the ``fit()`` method. You could define -additional hyperparameters in ``pyproject.toml`` and access them here. + # Run with default arguments + $ flwr run . localhost-gpu -.. code:: python +This will use the default arguments where each ``ClientApp`` will use 2 CPUs and at most +4 ``ClientApp``\s will run in a given GPU. - def client_fn(context: Context): +You can also override the parameters defined in the ``[tool.flwr.app.config]`` section +in ``pyproject.toml`` like this: - # Get this client's dataset partition - partition_id = context.node_config["partition-id"] - num_partitions = context.node_config["num-partitions"] - model_name = context.run_config["model-name"] - trainloader, valloader = load_data(partition_id, num_partitions, model_name) +.. code-block:: shell - # Load model - num_labels = context.run_config["num-labels"] - net = AutoModelForSequenceClassification.from_pretrained( - model_name, num_labels=num_labels - ) + # Override some arguments + $ flwr run . --run-config "num-server-rounds=5 fraction-fit=0.2" + +What follows is an explanation of each component in the project you just created: +dataset partition, the model, defining the ``ClientApp`` and defining the ``ServerApp``. + +The Data +-------- + +This tutorial uses |flowerdatasets|_ to easily download and partition the `IMDB +`_ dataset. In this example you'll +make use of the |iidpartitioner|_ to generate ``num_partitions`` partitions. You can +choose |otherpartitioners|_ available in Flower Datasets. To tokenize the text, we will +also load the tokenizer from the pre-trained Transformer model that we'll use during +training - more on that in the next section. Each ``ClientApp`` will call this function +to create dataloaders with the data that correspond to their data partition. + +.. code-block:: python + + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="stanfordnlp/imdb", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id) + # Divide data: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + + tokenizer = AutoTokenizer.from_pretrained(model_name) + + + def tokenize_function(examples): + return tokenizer( + examples["text"], truncation=True, add_special_tokens=True, max_length=512 + ) + + + partition_train_test = partition_train_test.map(tokenize_function, batched=True) + partition_train_test = partition_train_test.remove_columns("text") + partition_train_test = partition_train_test.rename_column("label", "labels") + + data_collator = DataCollatorWithPadding(tokenizer=tokenizer) + trainloader = DataLoader( + partition_train_test["train"], + shuffle=True, + batch_size=32, + collate_fn=data_collator, + ) + + testloader = DataLoader( + partition_train_test["test"], batch_size=32, collate_fn=data_collator + ) + +The Model +--------- + +We will leverage 🤗 Hugging Face to federate the training of language models over +multiple clients using Flower. More specifically, we will fine-tune a pre-trained +Transformer model (|berttiny|_) for sequence classification over the dataset of IMDB +ratings. The end goal is to detect if a movie rating is positive or negative. If you +have access to larger GPUs, feel free to use larger models! + +.. code-block:: python + + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) + +Note that here, ``model_name`` is a string that will be loaded from the ``Context`` in +the ClientApp and ServerApp. + +In addition to loading the pretrained model weights and architecture, we also include +two utility functions to perform both training (i.e. ``train()``) and evaluation (i.e. +``test()``) using the above model. These functions should look fairly familiar if you +have some prior experience with PyTorch. Note these functions do not have anything +specific to Flower. That being said, the training function will normally be called, as +we'll see later, from a Flower client passing its own data. In summary, your clients can +use standard training/testing functions to perform local training or evaluation: + +.. code-block:: python + + def train(net, trainloader, epochs, device): + optimizer = AdamW(net.parameters(), lr=5e-5) + net.train() + for _ in range(epochs): + for batch in trainloader: + batch = {k: v.to(device) for k, v in batch.items()} + outputs = net(**batch) + loss = outputs.loss + loss.backward() + optimizer.step() + optimizer.zero_grad() + + + def test(net, testloader, device): + metric = load_metric("accuracy") + loss = 0 + net.eval() + for batch in testloader: + batch = {k: v.to(device) for k, v in batch.items()} + with torch.no_grad(): + outputs = net(**batch) + logits = outputs.logits + loss += outputs.loss.item() + predictions = torch.argmax(logits, dim=-1) + metric.add_batch(predictions=predictions, references=batch["labels"]) + loss /= len(testloader.dataset) + accuracy = metric.compute()["accuracy"] + return loss, accuracy + +The ClientApp +------------- + +The main changes we have to make to use 🤗 Hugging Face with Flower will be found in the +``get_weights()`` and ``set_weights()`` functions. Under the hood, the ``transformers`` +library uses PyTorch, which means we can reuse the ``get_weights()`` and +``set_weights()`` code that we defined in the :doc:`Quickstart PyTorch +` tutorial. As a reminder, in ``get_weights()``, PyTorch +model parameters are extracted and represented as a list of NumPy arrays. The +``set_weights()`` function that's the opposite: given a list of NumPy arrays it applies +them to an existing PyTorch model. Doing this in fairly easy in PyTorch. - local_epochs = context.run_config["local-epochs"] - - # Return Client instance - return FlowerClient(net, trainloader, valloader, local_epochs).to_client() - - # Flower ClientApp - app = ClientApp(client_fn) +.. note:: -*************** - The ServerApp -*************** + The specific implementation of ``get_weights()`` and ``set_weights()`` depends on + the type of models you use. The ones shown below work for a wide range of PyTorch + models but you might need to adjust them if you have more exotic model + architectures. + +.. code-block:: python + + def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + + def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + +The rest of the functionality is directly inspired by the centralized case. The +``fit()`` method in the client trains the model using the local dataset. Similarly, the +``evaluate()`` method is used to evaluate the model received on a held-out validation +set that the client might have: + +.. code-block:: python + + class FlowerClient(NumPyClient): + def __init__(self, net, trainloader, testloader, local_epochs): + self.net = net + self.trainloader = trainloader + self.testloader = testloader + self.local_epochs = local_epochs + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.net.to(self.device) + + def fit(self, parameters, config): + set_weights(self.net, parameters) + train(self.net, self.trainloader, epochs=self.local_epochs, device=self.device) + return get_weights(self.net), len(self.trainloader), {} + + def evaluate(self, parameters, config): + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.testloader, self.device) + return float(loss), len(self.testloader), {"accuracy": accuracy} + +Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` defined above by +means of a ``client_fn()`` callback. Note that the `context` enables you to get access +to hyperparemeters defined in your ``pyproject.toml`` to configure the run. In this +tutorial we access the ``local-epochs`` setting to control the number of epochs a +``ClientApp`` will perform when running the ``fit()`` method. You could define +additional hyperparameters in ``pyproject.toml`` and access them here. -To construct a ``ServerApp`` we define a ``server_fn()`` callback with -an identical signature to that of ``client_fn()`` but the return type is -|serverappcomponents|_ as opposed to a |client|_ In this example we use -the `FedAvg` strategy. To it we pass a randomly initialized model that -will server as the global model to federated. Note that the value of -``fraction_fit`` is read from the run config. You can find the default -value defined in the ``pyproject.toml``. +.. code-block:: python -.. code:: python + def client_fn(context: Context): - def server_fn(context: Context): - # Read from config - num_rounds = context.run_config["num-server-rounds"] - fraction_fit = context.run_config["fraction-fit"] + # Get this client's dataset partition + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + model_name = context.run_config["model-name"] + trainloader, valloader = load_data(partition_id, num_partitions, model_name) - # Initialize global model - model_name = context.run_config["model-name"] - num_labels = context.run_config["num-labels"] - net = AutoModelForSequenceClassification.from_pretrained( - model_name, num_labels=num_labels - ) + # Load model + num_labels = context.run_config["num-labels"] + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) - weights = get_weights(net) - initial_parameters = ndarrays_to_parameters(weights) + local_epochs = context.run_config["local-epochs"] - # Define strategy - strategy = FedAvg( - fraction_fit=fraction_fit, - fraction_evaluate=1.0, - initial_parameters=initial_parameters, - ) - config = ServerConfig(num_rounds=num_rounds) + # Return Client instance + return FlowerClient(net, trainloader, valloader, local_epochs).to_client() - return ServerAppComponents(strategy=strategy, config=config) + # Flower ClientApp + app = ClientApp(client_fn) - # Create ServerApp - app = ServerApp(server_fn=server_fn) +The ServerApp +------------- -Congratulations! You've successfully built and run your first federated -learning system for an LLM. +To construct a ``ServerApp`` we define a ``server_fn()`` callback with an identical +signature to that of ``client_fn()`` but the return type is |serverappcomponents|_ as +opposed to a |client|_ In this example we use the `FedAvg` strategy. To it we pass a +randomly initialized model that will server as the global model to federated. Note that +the value of ``fraction_fit`` is read from the run config. You can find the default +value defined in the ``pyproject.toml``. -.. note:: +.. code-block:: python - Check the source code of the extended version of this tutorial in - |quickstart_hf_link|_ in the Flower GitHub repository. For a - comprehensive example of a federated fine-tuning of an LLM with - Flower, refer to the |flowertune|_ example in the Flower GitHub - repository. + def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] -.. |quickstart_hf_link| replace:: + # Initialize global model + model_name = context.run_config["model-name"] + num_labels = context.run_config["num-labels"] + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) - ``examples/quickstart-huggingface`` + weights = get_weights(net) + initial_parameters = ndarrays_to_parameters(weights) -.. |fedavg| replace:: + # Define strategy + strategy = FedAvg( + fraction_fit=fraction_fit, + fraction_evaluate=1.0, + initial_parameters=initial_parameters, + ) + config = ServerConfig(num_rounds=num_rounds) - ``FedAvg`` + return ServerAppComponents(strategy=strategy, config=config) -.. |iidpartitioner| replace:: - ``IidPartitioner`` + # Create ServerApp + app = ServerApp(server_fn=server_fn) -.. |otherpartitioners| replace:: +Congratulations! You've successfully built and run your first federated learning system +for an LLM. - other partitioners +.. note:: -.. |berttiny| replace:: + Check the source code of the extended version of this tutorial in + |quickstart_hf_link|_ in the Flower GitHub repository. For a comprehensive example + of a federated fine-tuning of an LLM with Flower, refer to the |flowertune|_ example + in the Flower GitHub repository. - ``bert-tiny`` +.. |quickstart_hf_link| replace:: ``examples/quickstart-huggingface`` -.. |serverappcomponents| replace:: +.. |fedavg| replace:: ``FedAvg`` - ``ServerAppComponents`` +.. |iidpartitioner| replace:: ``IidPartitioner`` -.. |client| replace:: +.. |otherpartitioners| replace:: other partitioners - ``Client`` +.. |berttiny| replace:: ``bert-tiny`` -.. |flowerdatasets| replace:: +.. |serverappcomponents| replace:: ``ServerAppComponents`` - Flower Datasets +.. |client| replace:: ``Client`` -.. |flowertune| replace:: +.. |flowerdatasets| replace:: Flower Datasets - FlowerTune LLM +.. |flowertune| replace:: FlowerTune LLM .. _berttiny: https://huggingface.co/prajjwal1/bert-tiny @@ -434,4 +400,4 @@ learning system for an LLM. .. _serverappcomponents: ref-api/flwr.server.ServerAppComponents.html#serverappcomponents .. meta:: - :description: Check out this Federating Learning quickstart tutorial for using Flower with 🤗 HuggingFace Transformers in order to fine-tune an LLM. + :description: Check out this Federating Learning quickstart tutorial for using Flower with 🤗 HuggingFace Transformers in order to fine-tune an LLM. diff --git a/doc/source/tutorial-quickstart-ios.rst b/doc/source/tutorial-quickstart-ios.rst index e4315ce569fb..8a9250f8dfb0 100644 --- a/doc/source/tutorial-quickstart-ios.rst +++ b/doc/source/tutorial-quickstart-ios.rst @@ -1,136 +1,155 @@ .. _quickstart-ios: - Quickstart iOS ============== .. meta:: - :description: Read this Federated Learning quickstart tutorial for creating an iOS app using Flower to train a neural network on MNIST. + :description: Read this Federated Learning quickstart tutorial for creating an iOS app using Flower to train a neural network on MNIST. -In this tutorial we will learn how to train a Neural Network on MNIST using Flower and CoreML on iOS devices. +In this tutorial we will learn how to train a Neural Network on MNIST using Flower and +CoreML on iOS devices. -First of all, for running the Flower Python server, it is recommended to create a virtual environment and run everything within a :doc:`virtualenv `. -For the Flower client implementation in iOS, it is recommended to use Xcode as our IDE. +First of all, for running the Flower Python server, it is recommended to create a +virtual environment and run everything within a :doc:`virtualenv +`. For the Flower client implementation in iOS, +it is recommended to use Xcode as our IDE. -Our example consists of one Python *server* and two iPhone *clients* that all have the same model. +Our example consists of one Python *server* and two iPhone *clients* that all have the +same model. -*Clients* are responsible for generating individual weight updates for the model based on their local datasets. -These updates are then sent to the *server* which will aggregate them to produce a better model. Finally, the *server* sends this improved version of the model back to each *client*. -A complete cycle of weight updates is called a *round*. +*Clients* are responsible for generating individual weight updates for the model based +on their local datasets. These updates are then sent to the *server* which will +aggregate them to produce a better model. Finally, the *server* sends this improved +version of the model back to each *client*. A complete cycle of weight updates is called +a *round*. -Now that we have a rough idea of what is going on, let's get started to setup our Flower server environment. We first need to install Flower. You can do this by using pip: +Now that we have a rough idea of what is going on, let's get started to setup our Flower +server environment. We first need to install Flower. You can do this by using pip: .. code-block:: shell - $ pip install flwr + $ pip install flwr Or Poetry: .. code-block:: shell - $ poetry add flwr + $ poetry add flwr Flower Client ------------- -Now that we have all our dependencies installed, let's run a simple distributed training using CoreML as our local training pipeline and MNIST as our dataset. -For simplicity reasons we will use the complete Flower client with CoreML, that has been implemented and stored inside the Swift SDK. The client implementation can be seen below: +Now that we have all our dependencies installed, let's run a simple distributed training +using CoreML as our local training pipeline and MNIST as our dataset. For simplicity +reasons we will use the complete Flower client with CoreML, that has been implemented +and stored inside the Swift SDK. The client implementation can be seen below: .. code-block:: swift - /// Parses the parameters from the local model and returns them as GetParametersRes struct - /// - /// - Returns: Parameters from the local model - public func getParameters() -> GetParametersRes { - let parameters = parameters.weightsToParameters() - let status = Status(code: .ok, message: String()) - - return GetParametersRes(parameters: parameters, status: status) - } - - /// Calls the routine to fit the local model - /// - /// - Returns: The result from the local training, e.g., updated parameters - public func fit(ins: FitIns) -> FitRes { - let status = Status(code: .ok, message: String()) - let result = runMLTask(configuration: parameters.parametersToWeights(parameters: ins.parameters), task: .train) - let parameters = parameters.weightsToParameters() - - return FitRes(parameters: parameters, numExamples: result.numSamples, status: status) - } + /// Parses the parameters from the local model and returns them as GetParametersRes struct + /// + /// - Returns: Parameters from the local model + public func getParameters() -> GetParametersRes { + let parameters = parameters.weightsToParameters() + let status = Status(code: .ok, message: String()) - /// Calls the routine to evaluate the local model - /// - /// - Returns: The result from the evaluation, e.g., loss - public func evaluate(ins: EvaluateIns) -> EvaluateRes { - let status = Status(code: .ok, message: String()) - let result = runMLTask(configuration: parameters.parametersToWeights(parameters: ins.parameters), task: .test) + return GetParametersRes(parameters: parameters, status: status) + } - return EvaluateRes(loss: Float(result.loss), numExamples: result.numSamples, status: status) - } + /// Calls the routine to fit the local model + /// + /// - Returns: The result from the local training, e.g., updated parameters + public func fit(ins: FitIns) -> FitRes { + let status = Status(code: .ok, message: String()) + let result = runMLTask(configuration: parameters.parametersToWeights(parameters: ins.parameters), task: .train) + let parameters = parameters.weightsToParameters() + + return FitRes(parameters: parameters, numExamples: result.numSamples, status: status) + } + + /// Calls the routine to evaluate the local model + /// + /// - Returns: The result from the evaluation, e.g., loss + public func evaluate(ins: EvaluateIns) -> EvaluateRes { + let status = Status(code: .ok, message: String()) + let result = runMLTask(configuration: parameters.parametersToWeights(parameters: ins.parameters), task: .test) + + return EvaluateRes(loss: Float(result.loss), numExamples: result.numSamples, status: status) + } -Let's create a new application project in Xcode and add :code:`flwr` as a dependency in your project. For our application, we will store the logic of our app in :code:`FLiOSModel.swift` and the UI elements in :code:`ContentView.swift`. -We will focus more on :code:`FLiOSModel.swift` in this quickstart. Please refer to the `full code example `_ to learn more about the app. +Let's create a new application project in Xcode and add ``flwr`` as a dependency in your +project. For our application, we will store the logic of our app in ``FLiOSModel.swift`` +and the UI elements in ``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` +in this quickstart. Please refer to the `full code example +`_ to learn more about the app. -Import Flower and CoreML related packages in :code:`FLiOSModel.swift`: +Import Flower and CoreML related packages in ``FLiOSModel.swift``: .. code-block:: swift - import Foundation - import CoreML - import flwr + import Foundation + import CoreML + import flwr -Then add the mlmodel to the project simply by drag-and-drop, the mlmodel will be bundled inside the application during deployment to your iOS device. -We need to pass the url to access mlmodel and run CoreML machine learning processes, it can be retrieved by calling the function :code:`Bundle.main.url`. -For the MNIST dataset, we need to preprocess it into :code:`MLBatchProvider` object. The preprocessing is done inside :code:`DataLoader.swift`. +Then add the mlmodel to the project simply by drag-and-drop, the mlmodel will be bundled +inside the application during deployment to your iOS device. We need to pass the url to +access mlmodel and run CoreML machine learning processes, it can be retrieved by calling +the function ``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into +``MLBatchProvider`` object. The preprocessing is done inside ``DataLoader.swift``. .. code-block:: swift - // prepare train dataset - let trainBatchProvider = DataLoader.trainBatchProvider() { _ in } + // prepare train dataset + let trainBatchProvider = DataLoader.trainBatchProvider() { _ in } - // prepare test dataset - let testBatchProvider = DataLoader.testBatchProvider() { _ in } + // prepare test dataset + let testBatchProvider = DataLoader.testBatchProvider() { _ in } - // load them together - let dataLoader = MLDataLoader(trainBatchProvider: trainBatchProvider, - testBatchProvider: testBatchProvider) + // load them together + let dataLoader = MLDataLoader(trainBatchProvider: trainBatchProvider, + testBatchProvider: testBatchProvider) -Since CoreML does not allow the model parameters to be seen before training, and accessing the model parameters during or after the training can only be done by specifying the layer name, -we need to know this information beforehand, through looking at the model specification, which are written as proto files. The implementation can be seen in :code:`MLModelInspect`. +Since CoreML does not allow the model parameters to be seen before training, and +accessing the model parameters during or after the training can only be done by +specifying the layer name, we need to know this information beforehand, through looking +at the model specification, which are written as proto files. The implementation can be +seen in ``MLModelInspect``. After we have all of the necessary information, let's create our Flower client. .. code-block:: swift - let compiledModelUrl = try MLModel.compileModel(at: url) + let compiledModelUrl = try MLModel.compileModel(at: url) - // inspect the model to be able to access the model parameters - // to access the model we need to know the layer name - // since the model parameters are stored as key value pairs - let modelInspect = try MLModelInspect(serializedData: Data(contentsOf: url)) - let layerWrappers = modelInspect.getLayerWrappers() - self.mlFlwrClient = MLFlwrClient(layerWrappers: layerWrappers, - dataLoader: dataLoader, - compiledModelUrl: compiledModelUrl) + // inspect the model to be able to access the model parameters + // to access the model we need to know the layer name + // since the model parameters are stored as key value pairs + let modelInspect = try MLModelInspect(serializedData: Data(contentsOf: url)) + let layerWrappers = modelInspect.getLayerWrappers() + self.mlFlwrClient = MLFlwrClient(layerWrappers: layerWrappers, + dataLoader: dataLoader, + compiledModelUrl: compiledModelUrl) -Then start the Flower gRPC client and start communicating to the server by passing our Flower client to the function :code:`startFlwrGRPC`. +Then start the Flower gRPC client and start communicating to the server by passing our +Flower client to the function ``startFlwrGRPC``. .. code-block:: swift - self.flwrGRPC = FlwrGRPC(serverHost: hostname, serverPort: port) - self.flwrGRPC.startFlwrGRPC(client: self.mlFlwrClient) + self.flwrGRPC = FlwrGRPC(serverHost: hostname, serverPort: port) + self.flwrGRPC.startFlwrGRPC(client: self.mlFlwrClient) -That's it for the client. We only have to implement :code:`Client` or call the provided -:code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. The attribute :code:`hostname` and :code:`port` tells the client which server to connect to. -This can be done by entering the hostname and port in the application before clicking the start button to start the federated learning process. +That's it for the client. We only have to implement ``Client`` or call the provided +``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute ``hostname`` and ``port`` +tells the client which server to connect to. This can be done by entering the hostname +and port in the application before clicking the start button to start the federated +learning process. Flower Server ------------- -For simple workloads we can start a Flower server and leave all the -configuration possibilities at their default values. In a file named -:code:`server.py`, import Flower and start the server: +For simple workloads we can start a Flower server and leave all the configuration +possibilities at their default values. In a file named ``server.py``, import Flower and +start the server: .. code-block:: python @@ -141,18 +160,21 @@ configuration possibilities at their default values. In a file named Train the model, federated! --------------------------- -With both client and server ready, we can now run everything and see federated -learning in action. FL systems usually have a server and multiple clients. We -therefore have to start the server first: +With both client and server ready, we can now run everything and see federated learning +in action. FL systems usually have a server and multiple clients. We therefore have to +start the server first: .. code-block:: shell $ python server.py -Once the server is running we can start the clients in different terminals. -Build and run the client through your Xcode, one through Xcode Simulator and the other by deploying it to your iPhone. -To see more about how to deploy your app to iPhone or Simulator visit `here `_. +Once the server is running we can start the clients in different terminals. Build and +run the client through your Xcode, one through Xcode Simulator and the other by +deploying it to your iPhone. To see more about how to deploy your app to iPhone or +Simulator visit `here +`_. -Congratulations! -You've successfully built and run your first federated learning system in your ios device. -The full `source code `_ for this example can be found in :code:`examples/ios`. +Congratulations! You've successfully built and run your first federated learning system +in your ios device. The full `source code +`_ for this example can be found +in ``examples/ios``. diff --git a/doc/source/tutorial-quickstart-jax.rst b/doc/source/tutorial-quickstart-jax.rst index d2b9243e2bb3..0581e95d8d42 100644 --- a/doc/source/tutorial-quickstart-jax.rst +++ b/doc/source/tutorial-quickstart-jax.rst @@ -1,34 +1,42 @@ .. _quickstart-jax: - Quickstart JAX ============== .. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with Jax to train a linear regression model on a scikit-learn dataset. - -This tutorial will show you how to use Flower to build a federated version of an existing JAX workload. -We are using JAX to train a linear regression model on a scikit-learn dataset. -We will structure the example similar to our `PyTorch - From Centralized To Federated `_ walkthrough. -First, we build a centralized training approach based on the `Linear Regression with JAX `_ tutorial`. -Then, we build upon the centralized training code to run the training in a federated fashion. - -Before we start building our JAX example, we need install the packages :code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`: + :description: Check out this Federated Learning quickstart tutorial for using Flower with Jax to train a linear regression model on a scikit-learn dataset. + +This tutorial will show you how to use Flower to build a federated version of an +existing JAX workload. We are using JAX to train a linear regression model on a +scikit-learn dataset. We will structure the example similar to our `PyTorch - From +Centralized To Federated +`_ +walkthrough. First, we build a centralized training approach based on the `Linear +Regression with JAX +`_ tutorial`. +Then, we build upon the centralized training code to run the training in a federated +fashion. + +Before we start building our JAX example, we need install the packages ``jax``, +``jaxlib``, ``scikit-learn``, and ``flwr``: .. code-block:: shell - $ pip install jax jaxlib scikit-learn flwr - + $ pip install jax jaxlib scikit-learn flwr Linear Regression with JAX -------------------------- -We begin with a brief description of the centralized training code based on a :code:`Linear Regression` model. -If you want a more in-depth explanation of what's going on then have a look at the official `JAX documentation `_. +We begin with a brief description of the centralized training code based on a ``Linear +Regression`` model. If you want a more in-depth explanation of what's going on then have +a look at the official `JAX documentation `_. -Let's create a new file called :code:`jax_training.py` with all the components required for a traditional (centralized) linear regression training. -First, the JAX packages :code:`jax` and :code:`jaxlib` need to be imported. In addition, we need to import :code:`sklearn` since we use :code:`make_regression` for the dataset and :code:`train_test_split` to split the dataset into a training and test set. -You can see that we do not yet import the :code:`flwr` package for federated learning. This will be done later. +Let's create a new file called ``jax_training.py`` with all the components required for +a traditional (centralized) linear regression training. First, the JAX packages ``jax`` +and ``jaxlib`` need to be imported. In addition, we need to import ``sklearn`` since we +use ``make_regression`` for the dataset and ``train_test_split`` to split the dataset +into a training and test set. You can see that we do not yet import the ``flwr`` package +for federated learning. This will be done later. .. code-block:: python @@ -40,47 +48,52 @@ You can see that we do not yet import the :code:`flwr` package for federated lea key = jax.random.PRNGKey(0) -The :code:`load_data()` function loads the mentioned training and test sets. +The ``load_data()`` function loads the mentioned training and test sets. .. code-block:: python - def load_data() -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray], List[np.ndarray]]: + def load_data() -> ( + Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray], List[np.ndarray]] + ): # create our dataset and start with similar datasets for different clients X, y = make_regression(n_features=3, random_state=0) X, X_test, y, y_test = train_test_split(X, y) return X, y, X_test, y_test -The model architecture (a very simple :code:`Linear Regression` model) is defined in :code:`load_model()`. +The model architecture (a very simple ``Linear Regression`` model) is defined in +``load_model()``. .. code-block:: python def load_model(model_shape) -> Dict: # model weights - params = { - 'b' : jax.random.uniform(key), - 'w' : jax.random.uniform(key, model_shape) - } + params = {"b": jax.random.uniform(key), "w": jax.random.uniform(key, model_shape)} return params -We now need to define the training (function :code:`train()`), which loops over the training set and measures the loss (function :code:`loss_fn()`) for each batch of training examples. The loss function is separate since JAX takes derivatives with a :code:`grad()` function (defined in the :code:`main()` function and called in :code:`train()`). +We now need to define the training (function ``train()``), which loops over the training +set and measures the loss (function ``loss_fn()``) for each batch of training examples. +The loss function is separate since JAX takes derivatives with a ``grad()`` function +(defined in the ``main()`` function and called in ``train()``). .. code-block:: python def loss_fn(params, X, y) -> Callable: - err = jnp.dot(X, params['w']) + params['b'] - y + err = jnp.dot(X, params["w"]) + params["b"] - y return jnp.mean(jnp.square(err)) # mse + def train(params, grad_fn, X, y) -> Tuple[np.array, float, int]: num_examples = X.shape[0] for epochs in range(10): grads = grad_fn(params, X, y) params = jax.tree_multimap(lambda p, g: p - 0.05 * g, params, grads) - loss = loss_fn(params,X, y) + loss = loss_fn(params, X, y) # if epochs % 10 == 9: # print(f'For Epoch {epochs} loss {loss}') return params, loss, num_examples -The evaluation of the model is defined in the function :code:`evaluation()`. The function takes all test examples and measures the loss of the linear regression model. +The evaluation of the model is defined in the function ``evaluation()``. The function +takes all test examples and measures the loss of the linear regression model. .. code-block:: python @@ -91,7 +104,9 @@ The evaluation of the model is defined in the function :code:`evaluation()`. The # print(f'Test loss {loss_test}') return loss_test, num_examples -Having defined the data loading, model architecture, training, and evaluation we can put everything together and train our model using JAX. As already mentioned, the :code:`jax.grad()` function is defined in :code:`main()` and passed to :code:`train()`. +Having defined the data loading, model architecture, training, and evaluation we can put +everything together and train our model using JAX. As already mentioned, the +``jax.grad()`` function is defined in ``main()`` and passed to ``train()``. .. code-block:: python @@ -100,7 +115,7 @@ Having defined the data loading, model architecture, training, and evaluation we model_shape = X.shape[1:] grad_fn = jax.grad(loss_fn) print("Model Shape", model_shape) - params = load_model(model_shape) + params = load_model(model_shape) params, loss, num_examples = train(params, grad_fn, X, y) evaluation(params, grad_fn, X_test, y_test) @@ -110,40 +125,48 @@ Having defined the data loading, model architecture, training, and evaluation we You can now run your (centralized) JAX linear regression workload: -.. code-block:: python +.. code-block:: bash python3 jax_training.py -So far this should all look fairly familiar if you've used JAX before. -Let's take the next step and use what we've built to create a simple federated learning system consisting of one server and two clients. +So far this should all look fairly familiar if you've used JAX before. Let's take the +next step and use what we've built to create a simple federated learning system +consisting of one server and two clients. JAX meets Flower ---------------- -The concept of federating an existing workload is always the same and easy to understand. -We have to start a *server* and then use the code in :code:`jax_training.py` for the *clients* that are connected to the *server*. -The *server* sends model parameters to the clients. The *clients* run the training and update the parameters. -The updated parameters are sent back to the *server*, which averages all received parameter updates. -This describes one round of the federated learning process, and we repeat this for multiple rounds. +The concept of federating an existing workload is always the same and easy to +understand. We have to start a *server* and then use the code in ``jax_training.py`` for +the *clients* that are connected to the *server*. The *server* sends model parameters to +the clients. The *clients* run the training and update the parameters. The updated +parameters are sent back to the *server*, which averages all received parameter updates. +This describes one round of the federated learning process, and we repeat this for +multiple rounds. -Our example consists of one *server* and two *clients*. Let's set up :code:`server.py` first. The *server* needs to import the Flower package :code:`flwr`. -Next, we use the :code:`start_server` function to start a server and tell it to perform three rounds of federated learning. +Our example consists of one *server* and two *clients*. Let's set up ``server.py`` +first. The *server* needs to import the Flower package ``flwr``. Next, we use the +``start_server`` function to start a server and tell it to perform three rounds of +federated learning. .. code-block:: python import flwr as fl if __name__ == "__main__": - fl.server.start_server(server_address="0.0.0.0:8080", config=fl.server.ServerConfig(num_rounds=3)) + fl.server.start_server( + server_address="0.0.0.0:8080", config=fl.server.ServerConfig(num_rounds=3) + ) We can already start the *server*: -.. code-block:: python +.. code-block:: bash python3 server.py -Finally, we will define our *client* logic in :code:`client.py` and build upon the previously defined JAX training in :code:`jax_training.py`. -Our *client* needs to import :code:`flwr`, but also :code:`jax` and :code:`jaxlib` to update the parameters on our JAX model: +Finally, we will define our *client* logic in ``client.py`` and build upon the +previously defined JAX training in ``jax_training.py``. Our *client* needs to import +``flwr``, but also ``jax`` and ``jaxlib`` to update the parameters on our JAX model: .. code-block:: python @@ -156,36 +179,45 @@ Our *client* needs to import :code:`flwr`, but also :code:`jax` and :code:`jaxli import jax_training - -Implementing a Flower *client* basically means implementing a subclass of either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. -Our implementation will be based on :code:`flwr.client.NumPyClient` and we'll call it :code:`FlowerClient`. -:code:`NumPyClient` is slightly easier to implement than :code:`Client` if you use a framework with good NumPy interoperability (like JAX) because it avoids some of the boilerplate that would otherwise be necessary. -:code:`FlowerClient` needs to implement four methods, two methods for getting/setting model parameters, one method for training the model, and one method for testing the model: - -#. :code:`set_parameters (optional)` - * set the model parameters on the local model that are received from the server - * transform parameters to NumPy :code:`ndarray`'s - * loop over the list of model parameters received as NumPy :code:`ndarray`'s (think list of neural network layers) -#. :code:`get_parameters` - * get the model parameters and return them as a list of NumPy :code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects) -#. :code:`fit` - * update the parameters of the local model with the parameters received from the server - * train the model on the local training set - * get the updated local model parameters and return them to the server -#. :code:`evaluate` - * update the parameters of the local model with the parameters received from the server - * evaluate the updated model on the local test set - * return the local loss to the server - -The challenging part is to transform the JAX model parameters from :code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with `NumPyClient`. - -The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make use of the functions :code:`train()` and :code:`evaluate()` previously defined in :code:`jax_training.py`. -So what we really do here is we tell Flower through our :code:`NumPyClient` subclass which of our already defined functions to call for training and evaluation. -We included type annotations to give you a better understanding of the data types that get passed around. +Implementing a Flower *client* basically means implementing a subclass of either +``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our implementation will be based +on ``flwr.client.NumPyClient`` and we'll call it ``FlowerClient``. ``NumPyClient`` is +slightly easier to implement than ``Client`` if you use a framework with good NumPy +interoperability (like JAX) because it avoids some of the boilerplate that would +otherwise be necessary. ``FlowerClient`` needs to implement four methods, two methods +for getting/setting model parameters, one method for training the model, and one method +for testing the model: + +1. ``set_parameters (optional)`` + - set the model parameters on the local model that are received from the server + - transform parameters to NumPy ``ndarray``'s + - loop over the list of model parameters received as NumPy ``ndarray``'s (think + list of neural network layers) +2. ``get_parameters`` + - get the model parameters and return them as a list of NumPy ``ndarray``'s + (which is what ``flwr.client.NumPyClient`` expects) +3. ``fit`` + - update the parameters of the local model with the parameters received from the + server + - train the model on the local training set + - get the updated local model parameters and return them to the server +4. ``evaluate`` + - update the parameters of the local model with the parameters received from the + server + - evaluate the updated model on the local test set + - return the local loss to the server + +The challenging part is to transform the JAX model parameters from ``DeviceArray`` to +``NumPy ndarray`` to make them compatible with `NumPyClient`. + +The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the functions +``train()`` and ``evaluate()`` previously defined in ``jax_training.py``. So what we +really do here is we tell Flower through our ``NumPyClient`` subclass which of our +already defined functions to call for training and evaluation. We included type +annotations to give you a better understanding of the data types that get passed around. .. code-block:: python - class FlowerClient(fl.client.NumPyClient): """Flower client implementing using linear regression and JAX.""" @@ -198,7 +230,7 @@ We included type annotations to give you a better understanding of the data type test_x: List[np.ndarray], test_y: List[np.ndarray], ) -> None: - self.params= params + self.params = params self.grad_fn = grad_fn self.train_x = train_x self.train_y = train_y @@ -211,25 +243,26 @@ We included type annotations to give you a better understanding of the data type for _, val in self.params.items(): parameter_value.append(np.array(val)) return parameter_value - + def set_parameters(self, parameters: List[np.ndarray]) -> Dict: # Collect model parameters and update the parameters of the local model - value=jnp.ndarray - params_item = list(zip(self.params.keys(),parameters)) + value = jnp.ndarray + params_item = list(zip(self.params.keys(), parameters)) for item in params_item: key = item[0] value = item[1] self.params[key] = value return self.params - def fit( self, parameters: List[np.ndarray], config: Dict ) -> Tuple[List[np.ndarray], int, Dict]: # Set model parameters, train model, return updated model parameters print("Start local training") self.params = self.set_parameters(parameters) - self.params, loss, num_examples = jax_training.train(self.params, self.grad_fn, self.train_x, self.train_y) + self.params, loss, num_examples = jax_training.train( + self.params, self.grad_fn, self.train_x, self.train_y + ) results = {"loss": float(loss)} print("Training results", results) return self.get_parameters(config={}), num_examples, results @@ -240,7 +273,9 @@ We included type annotations to give you a better understanding of the data type # Set model parameters, evaluate the model on a local test dataset, return result print("Start evaluation") self.params = self.set_parameters(parameters) - loss, num_examples = jax_training.evaluation(self.params,self.grad_fn, self.test_x, self.test_y) + loss, num_examples = jax_training.evaluation( + self.params, self.grad_fn, self.test_x, self.test_y + ) print("Evaluation accuracy & loss", loss) return ( float(loss), @@ -267,22 +302,25 @@ Having defined the federation process, we can run it. client = FlowerClient(params, grad_fn, train_x, train_y, test_x, test_y) fl.client.start_client(server_address="0.0.0.0:8080", client=client.to_client()) + if __name__ == "__main__": main() - And that's it. You can now open two additional terminal windows and run -.. code-block:: python +.. code-block:: bash python3 client.py -in each window (make sure that the server is still running before you do so) and see your JAX project run federated learning across two clients. Congratulations! +in each window (make sure that the server is still running before you do so) and see +your JAX project run federated learning across two clients. Congratulations! Next Steps ---------- -The source code of this example was improved over time and can be found here: `Quickstart JAX `_. +The source code of this example was improved over time and can be found here: +`Quickstart JAX `_. Our example is somewhat over-simplified because both clients load the same dataset. -You're now prepared to explore this topic further. How about using a more sophisticated model or using a different dataset? How about adding more clients? +You're now prepared to explore this topic further. How about using a more sophisticated +model or using a different dataset? How about adding more clients? diff --git a/doc/source/tutorial-quickstart-mlx.rst b/doc/source/tutorial-quickstart-mlx.rst index 675a08502d26..40e870ddc822 100644 --- a/doc/source/tutorial-quickstart-mlx.rst +++ b/doc/source/tutorial-quickstart-mlx.rst @@ -1,410 +1,393 @@ .. _quickstart-mlx: -################ - Quickstart MLX -################ - -In this federated learning tutorial we will learn how to train simple -MLP on MNIST using Flower and MLX. It is recommended to create a virtual -environment and run everything within a :doc:`virtualenv -`. - -Let's use `flwr new` to create a complete Flower+MLX project. It will -generate all the files needed to run, by default with the Simulation -Engine, a federation of 10 nodes using `FedAvg +Quickstart MLX +============== + +In this federated learning tutorial we will learn how to train simple MLP on MNIST using +Flower and MLX. It is recommended to create a virtual environment and run everything +within a :doc:`virtualenv `. + +Let's use `flwr new` to create a complete Flower+MLX project. It will generate all the +files needed to run, by default with the Simulation Engine, a federation of 10 nodes +using `FedAvg `_. The dataset will be partitioned using Flower Dataset's `IidPartitioner `_. -Now that we have a rough idea of what this example is about, let's get -started. First, install Flower in your new environment: +Now that we have a rough idea of what this example is about, let's get started. First, +install Flower in your new environment: -.. code:: shell +.. code-block:: shell - # In a new Python environment - $ pip install flwr + # In a new Python environment + $ pip install flwr -Then, run the command below. You will be prompted to select of the -available templates (choose ``MLX``), give a name to your project, and -type in your developer name: +Then, run the command below. You will be prompted to select of the available templates +(choose ``MLX``), give a name to your project, and type in your developer name: -.. code:: shell +.. code-block:: shell - $ flwr new + $ flwr new -After running it you'll notice a new directory with your project name -has been created. It should have the following structure: +After running it you'll notice a new directory with your project name has been created. +It should have the following structure: -.. code:: shell +.. code-block:: shell - - ├── - │ ├── __init__.py - │ ├── client_app.py # Defines your ClientApp - │ ├── server_app.py # Defines your ServerApp - │ └── task.py # Defines your model, training and data loading - ├── pyproject.toml # Project metadata like dependencies and configs - └── README.md + + ├── + │ ├── __init__.py + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md -If you haven't yet installed the project and its dependencies, you can -do so by: +If you haven't yet installed the project and its dependencies, you can do so by: -.. code:: shell +.. code-block:: shell - # From the directory where your pyproject.toml is - $ pip install -e . + # From the directory where your pyproject.toml is + $ pip install -e . To run the project do: -.. code:: shell +.. code-block:: shell - # Run with default arguments - $ flwr run . + # Run with default arguments + $ flwr run . With default arguments you will see an output like this one: -.. code:: shell - - Loading project configuration... - Success - INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout - INFO : - INFO : [INIT] - INFO : Requesting initial parameters from one random client - WARNING : FAB ID is not provided; the default ClientApp will be loaded. - INFO : Received initial parameters from one random client - INFO : Evaluating initial global parameters - INFO : - INFO : [ROUND 1] - INFO : configure_fit: strategy sampled 10 clients (out of 10) - INFO : aggregate_fit: received 10 results and 0 failures - WARNING : No fit_metrics_aggregation_fn provided - INFO : configure_evaluate: strategy sampled 10 clients (out of 10) - INFO : aggregate_evaluate: received 10 results and 0 failures - WARNING : No evaluate_metrics_aggregation_fn provided - INFO : - INFO : [ROUND 2] - INFO : configure_fit: strategy sampled 10 clients (out of 10) - INFO : aggregate_fit: received 10 results and 0 failures - INFO : configure_evaluate: strategy sampled 10 clients (out of 10) - INFO : aggregate_evaluate: received 10 results and 0 failures - INFO : - INFO : [ROUND 3] - INFO : configure_fit: strategy sampled 10 clients (out of 10) - INFO : aggregate_fit: received 10 results and 0 failures - INFO : configure_evaluate: strategy sampled 10 clients (out of 10) - INFO : aggregate_evaluate: received 10 results and 0 failures - INFO : - INFO : [SUMMARY] - INFO : Run finished 3 round(s) in 8.15s - INFO : History (loss, distributed): - INFO : round 1: 2.243802046775818 - INFO : round 2: 2.101812958717346 - INFO : round 3: 1.7419301986694335 - INFO : - -You can also override the parameters defined in -``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this: - -.. code:: shell - - # Override some arguments - $ flwr run . --run-config "num-server-rounds=5 lr=0.05" - -What follows is an explanation of each component in the project you just -created: dataset partition, the model, defining the ``ClientApp`` and -defining the ``ServerApp``. - -********** - The Data -********** - -We will use `Flower Datasets `_ to -easily download and partition the `MNIST` dataset. In this example -you'll make use of the `IidPartitioner +.. code-block:: shell + + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Requesting initial parameters from one random client + WARNING : FAB ID is not provided; the default ClientApp will be loaded. + INFO : Received initial parameters from one random client + INFO : Evaluating initial global parameters + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 8.15s + INFO : History (loss, distributed): + INFO : round 1: 2.243802046775818 + INFO : round 2: 2.101812958717346 + INFO : round 3: 1.7419301986694335 + INFO : + +You can also override the parameters defined in ``[tool.flwr.app.config]`` section in +the ``pyproject.toml`` like this: + +.. code-block:: shell + + # Override some arguments + $ flwr run . --run-config "num-server-rounds=5 lr=0.05" + +What follows is an explanation of each component in the project you just created: +dataset partition, the model, defining the ``ClientApp`` and defining the ``ServerApp``. + +The Data +-------- + +We will use `Flower Datasets `_ to easily download and +partition the `MNIST` dataset. In this example you'll make use of the `IidPartitioner `_ -to generate `num_partitions` partitions. You can choose `other -partitioners -`_ -available in Flower Datasets: - -.. code:: python - - partitioner = IidPartitioner(num_partitions=num_partitions) - fds = FederatedDataset( - dataset="ylecun/mnist", - partitioners={"train": partitioner}, - ) - partition = fds.load_partition(partition_id) - partition_splits = partition.train_test_split(test_size=0.2, seed=42) - - partition_splits["train"].set_format("numpy") - partition_splits["test"].set_format("numpy") - - train_partition = partition_splits["train"].map( - lambda img: {"img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0}, - input_columns="image", - ) - test_partition = partition_splits["test"].map( - lambda img: {"img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0}, - input_columns="image", - ) - - data = ( - train_partition["img"], - train_partition["label"].astype(np.uint32), - test_partition["img"], - test_partition["label"].astype(np.uint32), - ) - - train_images, train_labels, test_images, test_labels = map(mx.array, data) - -*********** - The Model -*********** +to generate `num_partitions` partitions. You can choose `other partitioners +`_ available in +Flower Datasets: + +.. code-block:: python + + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="ylecun/mnist", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id) + partition_splits = partition.train_test_split(test_size=0.2, seed=42) + + partition_splits["train"].set_format("numpy") + partition_splits["test"].set_format("numpy") + + train_partition = partition_splits["train"].map( + lambda img: {"img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0}, + input_columns="image", + ) + test_partition = partition_splits["test"].map( + lambda img: {"img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0}, + input_columns="image", + ) + + data = ( + train_partition["img"], + train_partition["label"].astype(np.uint32), + test_partition["img"], + test_partition["label"].astype(np.uint32), + ) + + train_images, train_labels, test_images, test_labels = map(mx.array, data) + +The Model +--------- We define the model as in the `centralized MLX example -`_, it's a -simple MLP: +`_, it's a simple MLP: -.. code:: python +.. code-block:: python - class MLP(nn.Module): - """A simple MLP.""" + class MLP(nn.Module): + """A simple MLP.""" - def __init__( - self, num_layers: int, input_dim: int, hidden_dim: int, output_dim: int - ): - super().__init__() - layer_sizes = [input_dim] + [hidden_dim] * num_layers + [output_dim] - self.layers = [ - nn.Linear(idim, odim) - for idim, odim in zip(layer_sizes[:-1], layer_sizes[1:]) - ] + def __init__( + self, num_layers: int, input_dim: int, hidden_dim: int, output_dim: int + ): + super().__init__() + layer_sizes = [input_dim] + [hidden_dim] * num_layers + [output_dim] + self.layers = [ + nn.Linear(idim, odim) + for idim, odim in zip(layer_sizes[:-1], layer_sizes[1:]) + ] - def __call__(self, x): - for l in self.layers[:-1]: - x = mx.maximum(l(x), 0.0) - return self.layers[-1](x) + def __call__(self, x): + for l in self.layers[:-1]: + x = mx.maximum(l(x), 0.0) + return self.layers[-1](x) -We also define some utility functions to test our model and to iterate -over batches. +We also define some utility functions to test our model and to iterate over batches. -.. code:: python +.. code-block:: python - def loss_fn(model, X, y): - return mx.mean(nn.losses.cross_entropy(model(X), y)) + def loss_fn(model, X, y): + return mx.mean(nn.losses.cross_entropy(model(X), y)) - def eval_fn(model, X, y): - return mx.mean(mx.argmax(model(X), axis=1) == y) + def eval_fn(model, X, y): + return mx.mean(mx.argmax(model(X), axis=1) == y) - def batch_iterate(batch_size, X, y): - perm = mx.array(np.random.permutation(y.size)) - for s in range(0, y.size, batch_size): - ids = perm[s : s + batch_size] - yield X[ids], y[ids] + def batch_iterate(batch_size, X, y): + perm = mx.array(np.random.permutation(y.size)) + for s in range(0, y.size, batch_size): + ids = perm[s : s + batch_size] + yield X[ids], y[ids] The ClientApp -============= +~~~~~~~~~~~~~ -The main changes we have to make to use `MLX` with `Flower` will be -found in the ``get_params()`` and ``set_params()`` functions. Indeed, -MLX doesn't provide an easy way to convert the model parameters into a -list of ``np.array`` objects (the format we need for the serialization -of the messages to work). +The main changes we have to make to use `MLX` with `Flower` will be found in the +``get_params()`` and ``set_params()`` functions. Indeed, MLX doesn't provide an easy way +to convert the model parameters into a list of ``np.array`` objects (the format we need +for the serialization of the messages to work). The way MLX stores its parameters is as follows: -.. code:: shell - - { - "layers": [ - {"weight": mlx.core.array, "bias": mlx.core.array}, - {"weight": mlx.core.array, "bias": mlx.core.array}, - ..., - {"weight": mlx.core.array, "bias": mlx.core.array} - ] - } - -Therefore, to get our list of ``np.array`` objects, we need to extract -each array and convert them into a NumPy array: - -.. code:: python - - def get_params(model): - layers = model.parameters()["layers"] - return [np.array(val) for layer in layers for _, val in layer.items()] - -For the ``set_params()`` function, we perform the reverse operation. We -receive a list of NumPy arrays and want to convert them into MLX -parameters. Therefore, we iterate through pairs of parameters and assign -them to the `weight` and `bias` keys of each layer dict: - -.. code:: python - - def set_params(model, parameters): - new_params = {} - new_params["layers"] = [ - {"weight": mx.array(parameters[i]), "bias": mx.array(parameters[i + 1])} - for i in range(0, len(parameters), 2) - ] - model.update(new_params) - -The rest of the functionality is directly inspired by the centralized -case. The ``fit()`` method in the client trains the model using the -local dataset: - -.. code:: python - - def fit(self, parameters, config): - self.set_parameters(parameters) - for _ in range(self.num_epochs): - for X, y in batch_iterate( - self.batch_size, self.train_images, self.train_labels - ): - _, grads = self.loss_and_grad_fn(self.model, X, y) - self.optimizer.update(self.model, grads) - mx.eval(self.model.parameters(), self.optimizer.state) - return self.get_parameters(config={}), len(self.train_images), {} - -Here, after updating the parameters, we perform the training as in the -centralized case, and return the new parameters. +.. code-block:: shell + + { + "layers": [ + {"weight": mlx.core.array, "bias": mlx.core.array}, + {"weight": mlx.core.array, "bias": mlx.core.array}, + ..., + {"weight": mlx.core.array, "bias": mlx.core.array} + ] + } + +Therefore, to get our list of ``np.array`` objects, we need to extract each array and +convert them into a NumPy array: + +.. code-block:: python + + def get_params(model): + layers = model.parameters()["layers"] + return [np.array(val) for layer in layers for _, val in layer.items()] + +For the ``set_params()`` function, we perform the reverse operation. We receive a list +of NumPy arrays and want to convert them into MLX parameters. Therefore, we iterate +through pairs of parameters and assign them to the `weight` and `bias` keys of each +layer dict: + +.. code-block:: python + + def set_params(model, parameters): + new_params = {} + new_params["layers"] = [ + {"weight": mx.array(parameters[i]), "bias": mx.array(parameters[i + 1])} + for i in range(0, len(parameters), 2) + ] + model.update(new_params) + +The rest of the functionality is directly inspired by the centralized case. The +``fit()`` method in the client trains the model using the local dataset: + +.. code-block:: python + + def fit(self, parameters, config): + self.set_parameters(parameters) + for _ in range(self.num_epochs): + for X, y in batch_iterate( + self.batch_size, self.train_images, self.train_labels + ): + _, grads = self.loss_and_grad_fn(self.model, X, y) + self.optimizer.update(self.model, grads) + mx.eval(self.model.parameters(), self.optimizer.state) + return self.get_parameters(config={}), len(self.train_images), {} + +Here, after updating the parameters, we perform the training as in the centralized case, +and return the new parameters. And for the ``evaluate()`` method of the client: -.. code:: python +.. code-block:: python - def evaluate(self, parameters, config): - self.set_parameters(parameters) - accuracy = eval_fn(self.model, self.test_images, self.test_labels) - loss = loss_fn(self.model, self.test_images, self.test_labels) - return loss.item(), len(self.test_images), {"accuracy": accuracy.item()} + def evaluate(self, parameters, config): + self.set_parameters(parameters) + accuracy = eval_fn(self.model, self.test_images, self.test_labels) + loss = loss_fn(self.model, self.test_images, self.test_labels) + return loss.item(), len(self.test_images), {"accuracy": accuracy.item()} -We also begin by updating the parameters with the ones sent by the -server, and then we compute the loss and accuracy using the functions -defined above. In the constructor of the ``FlowerClient`` we instantiate -the `MLP` model as well as other components such as the optimizer. +We also begin by updating the parameters with the ones sent by the server, and then we +compute the loss and accuracy using the functions defined above. In the constructor of +the ``FlowerClient`` we instantiate the `MLP` model as well as other components such as +the optimizer. Putting everything together we have: -.. code:: python - - class FlowerClient(NumPyClient): - def __init__( - self, - data, - num_layers, - hidden_dim, - num_classes, - batch_size, - learning_rate, - num_epochs, - ): - self.num_layers = num_layers - self.hidden_dim = hidden_dim - self.num_classes = num_classes - self.batch_size = batch_size - self.learning_rate = learning_rate - self.num_epochs = num_epochs - - self.train_images, self.train_labels, self.test_images, self.test_labels = data - self.model = MLP( - num_layers, self.train_images.shape[-1], hidden_dim, num_classes - ) - self.optimizer = optim.SGD(learning_rate=learning_rate) - self.loss_and_grad_fn = nn.value_and_grad(self.model, loss_fn) - self.num_epochs = num_epochs - self.batch_size = batch_size - - def get_parameters(self, config): - return get_params(self.model) - - def set_parameters(self, parameters): - set_params(self.model, parameters) - - def fit(self, parameters, config): - self.set_parameters(parameters) - for _ in range(self.num_epochs): - for X, y in batch_iterate( - self.batch_size, self.train_images, self.train_labels - ): - _, grads = self.loss_and_grad_fn(self.model, X, y) - self.optimizer.update(self.model, grads) - mx.eval(self.model.parameters(), self.optimizer.state) - return self.get_parameters(config={}), len(self.train_images), {} - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - accuracy = eval_fn(self.model, self.test_images, self.test_labels) - loss = loss_fn(self.model, self.test_images, self.test_labels) - return loss.item(), len(self.test_images), {"accuracy": accuracy.item()} - -Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` -defined above by means of a ``client_fn()`` callback. Note that -``context`` enables you to get access to hyperparemeters defined in -``pyproject.toml`` to configure the run. In this tutorial we access, -among other hyperparameters, the ``local-epochs`` setting to control the -number of epochs a ``ClientApp`` will perform when running the ``fit()`` -method. - -.. code:: python - - def client_fn(context: Context): - partition_id = context.node_config["partition-id"] - num_partitions = context.node_config["num-partitions"] - data = load_data(partition_id, num_partitions) - - num_layers = context.run_config["num-layers"] - hidden_dim = context.run_config["hidden-dim"] - num_classes = 10 - batch_size = context.run_config["batch-size"] - learning_rate = context.run_config["lr"] - num_epochs = context.run_config["local-epochs"] - - # Return Client instance - return FlowerClient( - data, num_layers, hidden_dim, num_classes, batch_size, learning_rate, num_epochs - ).to_client() - - - # Flower ClientApp - app = ClientApp(client_fn) +.. code-block:: python + + class FlowerClient(NumPyClient): + def __init__( + self, + data, + num_layers, + hidden_dim, + num_classes, + batch_size, + learning_rate, + num_epochs, + ): + self.num_layers = num_layers + self.hidden_dim = hidden_dim + self.num_classes = num_classes + self.batch_size = batch_size + self.learning_rate = learning_rate + self.num_epochs = num_epochs + + self.train_images, self.train_labels, self.test_images, self.test_labels = data + self.model = MLP( + num_layers, self.train_images.shape[-1], hidden_dim, num_classes + ) + self.optimizer = optim.SGD(learning_rate=learning_rate) + self.loss_and_grad_fn = nn.value_and_grad(self.model, loss_fn) + self.num_epochs = num_epochs + self.batch_size = batch_size + + def get_parameters(self, config): + return get_params(self.model) + + def set_parameters(self, parameters): + set_params(self.model, parameters) + + def fit(self, parameters, config): + self.set_parameters(parameters) + for _ in range(self.num_epochs): + for X, y in batch_iterate( + self.batch_size, self.train_images, self.train_labels + ): + _, grads = self.loss_and_grad_fn(self.model, X, y) + self.optimizer.update(self.model, grads) + mx.eval(self.model.parameters(), self.optimizer.state) + return self.get_parameters(config={}), len(self.train_images), {} + + def evaluate(self, parameters, config): + self.set_parameters(parameters) + accuracy = eval_fn(self.model, self.test_images, self.test_labels) + loss = loss_fn(self.model, self.test_images, self.test_labels) + return loss.item(), len(self.test_images), {"accuracy": accuracy.item()} + +Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` defined above by +means of a ``client_fn()`` callback. Note that ``context`` enables you to get access to +hyperparemeters defined in ``pyproject.toml`` to configure the run. In this tutorial we +access, among other hyperparameters, the ``local-epochs`` setting to control the number +of epochs a ``ClientApp`` will perform when running the ``fit()`` method. + +.. code-block:: python + + def client_fn(context: Context): + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + data = load_data(partition_id, num_partitions) + + num_layers = context.run_config["num-layers"] + hidden_dim = context.run_config["hidden-dim"] + num_classes = 10 + batch_size = context.run_config["batch-size"] + learning_rate = context.run_config["lr"] + num_epochs = context.run_config["local-epochs"] + + # Return Client instance + return FlowerClient( + data, num_layers, hidden_dim, num_classes, batch_size, learning_rate, num_epochs + ).to_client() + + + # Flower ClientApp + app = ClientApp(client_fn) The ServerApp -------------- ++++++++++++++ -To construct a ``ServerApp``, we define a ``server_fn()`` callback with -an identical signature to that of ``client_fn()``, but the return type -is `ServerAppComponents +To construct a ``ServerApp``, we define a ``server_fn()`` callback with an identical +signature to that of ``client_fn()``, but the return type is `ServerAppComponents `_ as opposed to `Client -`_. -In this example we use the ``FedAvg`` strategy. +`_. In this +example we use the ``FedAvg`` strategy. -.. code:: python +.. code-block:: python - def server_fn(context: Context): - # Read from config - num_rounds = context.run_config["num-server-rounds"] + def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] - # Define strategy - strategy = FedAvg() - config = ServerConfig(num_rounds=num_rounds) + # Define strategy + strategy = FedAvg() + config = ServerConfig(num_rounds=num_rounds) - return ServerAppComponents(strategy=strategy, config=config) + return ServerAppComponents(strategy=strategy, config=config) - # Create ServerApp - app = ServerApp(server_fn=server_fn) + # Create ServerApp + app = ServerApp(server_fn=server_fn) -Congratulations! You've successfully built and run your first federated -learning system. +Congratulations! You've successfully built and run your first federated learning system. .. note:: - Check the `source code - `_ - of the extended version of this tutorial in - ``examples/quickstart-mlx`` in the Flower GitHub repository. + Check the `source code + `_ of the extended + version of this tutorial in ``examples/quickstart-mlx`` in the Flower GitHub + repository. diff --git a/doc/source/tutorial-quickstart-pandas.rst b/doc/source/tutorial-quickstart-pandas.rst index bb9cb1b28b54..00d831a15736 100644 --- a/doc/source/tutorial-quickstart-pandas.rst +++ b/doc/source/tutorial-quickstart-pandas.rst @@ -1,12 +1,12 @@ .. _quickstart-pandas: - Quickstart Pandas ================= .. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with Pandas to perform Federated Analytics. + :description: Check out this Federated Learning quickstart tutorial for using Flower with Pandas to perform Federated Analytics. Let's build a federated analytics system using Pandas and Flower! -Please refer to the `full code example `_ to learn more. +Please refer to the `full code example +`_ to learn more. diff --git a/doc/source/tutorial-quickstart-pytorch-lightning.rst b/doc/source/tutorial-quickstart-pytorch-lightning.rst index 7c74c9a1682f..089865a2969d 100644 --- a/doc/source/tutorial-quickstart-pytorch-lightning.rst +++ b/doc/source/tutorial-quickstart-pytorch-lightning.rst @@ -1,119 +1,118 @@ .. _quickstart-pytorch-lightning: -############################## - Quickstart PyTorch Lightning -############################## +Quickstart PyTorch Lightning +============================ -In this federated learning tutorial we will learn how to train an -AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is -recommended to create a virtual environment and run everything within a -:doc:`virtualenv `. +In this federated learning tutorial we will learn how to train an AutoEncoder model on +MNIST using Flower and PyTorch Lightning. It is recommended to create a virtual +environment and run everything within a :doc:`virtualenv +`. Then, clone the code example directly from GitHub: -.. code:: shell +.. code-block:: shell - git clone --depth=1 https://github.com/adap/flower.git _tmp \ - && mv _tmp/examples/quickstart-pytorch-lightning . \ - && rm -rf _tmp && cd quickstart-pytorch-lightning + git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-pytorch-lightning . \ + && rm -rf _tmp && cd quickstart-pytorch-lightning -This will create a new directory called `quickstart-pytorch-lightning` -containing the following files: +This will create a new directory called `quickstart-pytorch-lightning` containing the +following files: -.. code:: shell +.. code-block:: shell - quickstart-pytorch-lightning - ├── pytorchlightning_example - │ ├── client_app.py # Defines your ClientApp - │ ├── server_app.py # Defines your ServerApp - │ └── task.py # Defines your model, training and data loading - ├── pyproject.toml # Project metadata like dependencies and configs - └── README.md + quickstart-pytorch-lightning + ├── pytorchlightning_example + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md Next, activate your environment, then run: -.. code:: shell +.. code-block:: shell - # Navigate to the example directory - $ cd path/to/quickstart-pytorch-lightning + # Navigate to the example directory + $ cd path/to/quickstart-pytorch-lightning - # Install project and dependencies - $ pip install -e . + # Install project and dependencies + $ pip install -e . -By default, Flower Simulation Engine will be started and it will create -a federation of 4 nodes using `FedAvg +By default, Flower Simulation Engine will be started and it will create a federation of +4 nodes using `FedAvg `_ -as the aggregation strategy. The dataset will be partitioned using -Flower Dataset's `IidPartitioner +as the aggregation strategy. The dataset will be partitioned using Flower Dataset's +`IidPartitioner `_. To run the project, do: -.. code:: shell +.. code-block:: shell - # Run with default arguments - $ flwr run . + # Run with default arguments + $ flwr run . With default arguments you will see an output like this one: -.. code:: shell - - Loading project configuration... - Success - INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout - INFO : - INFO : [INIT] - INFO : Using initial global parameters provided by strategy - INFO : Starting evaluation of initial global parameters - INFO : Evaluation returned no results (`None`) - INFO : - INFO : [ROUND 1] - INFO : configure_fit: strategy sampled 2 clients (out of 4) - INFO : aggregate_evaluate: received 2 results and 0 failures - WARNING : No evaluate_metrics_aggregation_fn provided - INFO : - INFO : [ROUND 2] - INFO : configure_fit: strategy sampled 2 clients (out of 4) - INFO : aggregate_fit: received 2 results and 0 failures - INFO : configure_evaluate: strategy sampled 2 clients (out of 4) - INFO : aggregate_evaluate: received 2 results and 0 failures - INFO : - INFO : [ROUND 3] - INFO : configure_fit: strategy sampled 2 clients (out of 4) - INFO : aggregate_fit: received 2 results and 0 failures - INFO : configure_evaluate: strategy sampled 2 clients (out of 4) - INFO : aggregate_evaluate: received 2 results and 0 failures - INFO : - INFO : [SUMMARY] - INFO : Run finished 3 round(s) in 136.92s - INFO : History (loss, distributed): - INFO : round 1: 0.04982871934771538 - INFO : round 2: 0.046457378193736076 - INFO : round 3: 0.04506748169660568 - INFO : - -Each simulated `ClientApp` (two per round) will also log a summary of -their local training process. Expect this output to be similar to: - -.. code:: shell - - # The left part indicates the process ID running the `ClientApp` - (ClientAppActor pid=38155) ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ - (ClientAppActor pid=38155) ┃ Test metric ┃ DataLoader 0 ┃ - (ClientAppActor pid=38155) ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ - (ClientAppActor pid=38155) │ test_loss │ 0.045175597071647644 │ - (ClientAppActor pid=38155) └───────────────────────────┴───────────────────────────┘ - -You can also override the parameters defined in the -``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this: - -.. code:: shell - - # Override some arguments - $ flwr run . --run-config num-server-rounds=5 +.. code-block:: shell + + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Using initial global parameters provided by strategy + INFO : Starting evaluation of initial global parameters + INFO : Evaluation returned no results (`None`) + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 2 clients (out of 4) + INFO : aggregate_evaluate: received 2 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 2 clients (out of 4) + INFO : aggregate_fit: received 2 results and 0 failures + INFO : configure_evaluate: strategy sampled 2 clients (out of 4) + INFO : aggregate_evaluate: received 2 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 2 clients (out of 4) + INFO : aggregate_fit: received 2 results and 0 failures + INFO : configure_evaluate: strategy sampled 2 clients (out of 4) + INFO : aggregate_evaluate: received 2 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 136.92s + INFO : History (loss, distributed): + INFO : round 1: 0.04982871934771538 + INFO : round 2: 0.046457378193736076 + INFO : round 3: 0.04506748169660568 + INFO : + +Each simulated `ClientApp` (two per round) will also log a summary of their local +training process. Expect this output to be similar to: + +.. code-block:: shell + + # The left part indicates the process ID running the `ClientApp` + (ClientAppActor pid=38155) ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ + (ClientAppActor pid=38155) ┃ Test metric ┃ DataLoader 0 ┃ + (ClientAppActor pid=38155) ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ + (ClientAppActor pid=38155) │ test_loss │ 0.045175597071647644 │ + (ClientAppActor pid=38155) └───────────────────────────┴───────────────────────────┘ + +You can also override the parameters defined in the ``[tool.flwr.app.config]`` section +in ``pyproject.toml`` like this: + +.. code-block:: shell + + # Override some arguments + $ flwr run . --run-config num-server-rounds=5 .. note:: - Check the `source code - `_ - of this tutorial in ``examples/quickstart-pytorch-lightning`` in the - Flower GitHub repository. + Check the `source code + `_ + of this tutorial in ``examples/quickstart-pytorch-lightning`` in the Flower GitHub + repository. diff --git a/doc/source/tutorial-quickstart-pytorch.rst b/doc/source/tutorial-quickstart-pytorch.rst index d00b9efbe16b..6b99e378d086 100644 --- a/doc/source/tutorial-quickstart-pytorch.rst +++ b/doc/source/tutorial-quickstart-pytorch.rst @@ -1,384 +1,366 @@ .. _quickstart-pytorch: -#################### - Quickstart PyTorch -#################### - -In this federated learning tutorial we will learn how to train a -Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is -recommended to create a virtual environment and run everything within a -:doc:`virtualenv `. - -Let's use `flwr new` to create a complete Flower+PyTorch project. It -will generate all the files needed to run, by default with the Flower -Simulation Engine, a federation of 10 nodes using `FedAvg +Quickstart PyTorch +================== + +In this federated learning tutorial we will learn how to train a Convolutional Neural +Network on CIFAR-10 using Flower and PyTorch. It is recommended to create a virtual +environment and run everything within a :doc:`virtualenv +`. + +Let's use `flwr new` to create a complete Flower+PyTorch project. It will generate all +the files needed to run, by default with the Flower Simulation Engine, a federation of +10 nodes using `FedAvg `_. The dataset will be partitioned using Flower Dataset's `IidPartitioner `_. -Now that we have a rough idea of what this example is about, let's get -started. First, install Flower in your new environment: +Now that we have a rough idea of what this example is about, let's get started. First, +install Flower in your new environment: -.. code:: shell +.. code-block:: shell - # In a new Python environment - $ pip install flwr + # In a new Python environment + $ pip install flwr -Then, run the command below. You will be prompted to select one of the -available templates (choose ``PyTorch``), give a name to your project, -and type in your developer name: +Then, run the command below. You will be prompted to select one of the available +templates (choose ``PyTorch``), give a name to your project, and type in your developer +name: -.. code:: shell +.. code-block:: shell - $ flwr new + $ flwr new -After running it you'll notice a new directory with your project name -has been created. It should have the following structure: +After running it you'll notice a new directory with your project name has been created. +It should have the following structure: -.. code:: shell +.. code-block:: shell - - ├── - │ ├── __init__.py - │ ├── client_app.py # Defines your ClientApp - │ ├── server_app.py # Defines your ServerApp - │ └── task.py # Defines your model, training and data loading - ├── pyproject.toml # Project metadata like dependencies and configs - └── README.md + + ├── + │ ├── __init__.py + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md -If you haven't yet installed the project and its dependencies, you can -do so by: +If you haven't yet installed the project and its dependencies, you can do so by: -.. code:: shell +.. code-block:: shell - # From the directory where your pyproject.toml is - $ pip install -e . + # From the directory where your pyproject.toml is + $ pip install -e . To run the project, do: -.. code:: shell +.. code-block:: shell - # Run with default arguments - $ flwr run . + # Run with default arguments + $ flwr run . With default arguments you will see an output like this one: -.. code:: shell - - Loading project configuration... - Success - WARNING : FAB ID is not provided; the default ClientApp will be loaded. - INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout - INFO : - INFO : [INIT] - INFO : Using initial global parameters provided by strategy - INFO : Evaluating initial global parameters - INFO : - INFO : [ROUND 1] - INFO : configure_fit: strategy sampled 5 clients (out of 10) - INFO : aggregate_fit: received 5 results and 0 failures - WARNING : No fit_metrics_aggregation_fn provided - INFO : configure_evaluate: strategy sampled 10 clients (out of 10) - INFO : aggregate_evaluate: received 10 results and 0 failures - WARNING : No evaluate_metrics_aggregation_fn provided - INFO : - INFO : [ROUND 2] - INFO : configure_fit: strategy sampled 5 clients (out of 10) - INFO : aggregate_fit: received 5 results and 0 failures - INFO : configure_evaluate: strategy sampled 10 clients (out of 10) - INFO : aggregate_evaluate: received 10 results and 0 failures - INFO : - INFO : [ROUND 3] - INFO : configure_fit: strategy sampled 5 clients (out of 10) - INFO : aggregate_fit: received 5 results and 0 failures - INFO : configure_evaluate: strategy sampled 10 clients (out of 10) - INFO : aggregate_evaluate: received 10 results and 0 failures - INFO : - INFO : [SUMMARY] - INFO : Run finished 3 round(s) in 21.35s - INFO : History (loss, distributed): - INFO : round 1: 2.2978184528648855 - INFO : round 2: 2.173852103948593 - INFO : round 3: 2.039920600131154 - INFO : - -You can also override the parameters defined in the -``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this: - -.. code:: shell - - # Override some arguments - $ flwr run . --run-config "num-server-rounds=5 local-epochs=3" - -What follows is an explanation of each component in the project you just -created: dataset partition, the model, defining the ``ClientApp`` and -defining the ``ServerApp``. - -********** - The Data -********** - -This tutorial uses `Flower Datasets `_ -to easily download and partition the `CIFAR-10` dataset. In this example -you'll make use of the `IidPartitioner +.. code-block:: shell + + Loading project configuration... + Success + WARNING : FAB ID is not provided; the default ClientApp will be loaded. + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Using initial global parameters provided by strategy + INFO : Evaluating initial global parameters + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 21.35s + INFO : History (loss, distributed): + INFO : round 1: 2.2978184528648855 + INFO : round 2: 2.173852103948593 + INFO : round 3: 2.039920600131154 + INFO : + +You can also override the parameters defined in the ``[tool.flwr.app.config]`` section +in ``pyproject.toml`` like this: + +.. code-block:: shell + + # Override some arguments + $ flwr run . --run-config "num-server-rounds=5 local-epochs=3" + +What follows is an explanation of each component in the project you just created: +dataset partition, the model, defining the ``ClientApp`` and defining the ``ServerApp``. + +The Data +-------- + +This tutorial uses `Flower Datasets `_ to easily +download and partition the `CIFAR-10` dataset. In this example you'll make use of the +`IidPartitioner `_ -to generate `num_partitions` partitions. You can choose `other -partitioners -`_ -available in Flower Datasets. Each ``ClientApp`` will call this function -to create dataloaders with the data that correspond to their data -partition. - -.. code:: python - - partitioner = IidPartitioner(num_partitions=num_partitions) - fds = FederatedDataset( - dataset="uoft-cs/cifar10", - partitioners={"train": partitioner}, - ) - partition = fds.load_partition(partition_id) - # Divide data on each node: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2, seed=42) - pytorch_transforms = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) - - - def apply_transforms(batch): - """Apply transforms to the partition from FederatedDataset.""" - batch["img"] = [pytorch_transforms(img) for img in batch["img"]] - return batch - - - partition_train_test = partition_train_test.with_transform(apply_transforms) - trainloader = DataLoader(partition_train_test["train"], batch_size=32, shuffle=True) - testloader = DataLoader(partition_train_test["test"], batch_size=32) - -*********** - The Model -*********** - -We defined a simple Convolutional Neural Network (CNN), but feel free to -replace it with a more sophisticated model if you'd like: - -.. code:: python - - class Net(nn.Module): - """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" - - def __init__(self): - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x): - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - return self.fc3(x) - -In addition to defining the model architecture, we also include two -utility functions to perform both training (i.e. ``train()``) and -evaluation (i.e. ``test()``) using the above model. These functions -should look fairly familiar if you have some prior experience with -PyTorch. Note these functions do not have anything specific to Flower. -That being said, the training function will normally be called, as we'll -see later, from a Flower client passing its own data. In summary, your -clients can use standard training/testing functions to perform local -training or evaluation: - -.. code:: python - - def train(net, trainloader, epochs, device): - """Train the model on the training set.""" - net.to(device) # move model to GPU if available - criterion = torch.nn.CrossEntropyLoss().to(device) - optimizer = torch.optim.SGD(net.parameters(), lr=0.1, momentum=0.9) - net.train() - running_loss = 0.0 - for _ in range(epochs): - for batch in trainloader: - images = batch["img"] - labels = batch["label"] - optimizer.zero_grad() - loss = criterion(net(images.to(device)), labels.to(device)) - loss.backward() - optimizer.step() - running_loss += loss.item() - - avg_trainloss = running_loss / len(trainloader) - return avg_trainloss - - - def test(net, testloader, device): - """Validate the model on the test set.""" - net.to(device) - criterion = torch.nn.CrossEntropyLoss() - correct, loss = 0, 0.0 - with torch.no_grad(): - for batch in testloader: - images = batch["img"].to(device) - labels = batch["label"].to(device) - outputs = net(images) - loss += criterion(outputs, labels).item() - correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() - accuracy = correct / len(testloader.dataset) - return loss, accuracy - -*************** - The ClientApp -*************** - -The main changes we have to make to use `PyTorch` with `Flower` will be -found in the ``get_weights()`` and ``set_weights()`` functions. In -``get_weights()`` PyTorch model parameters are extracted and represented -as a list of NumPy arrays. The ``set_weights()`` function that's the -oposite: given a list of NumPy arrays it applies them to an existing -PyTorch model. Doing this in fairly easy in PyTorch. +to generate `num_partitions` partitions. You can choose `other partitioners +`_ available in +Flower Datasets. Each ``ClientApp`` will call this function to create dataloaders with +the data that correspond to their data partition. + +.. code-block:: python + + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + pytorch_transforms = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["img"] = [pytorch_transforms(img) for img in batch["img"]] + return batch + + + partition_train_test = partition_train_test.with_transform(apply_transforms) + trainloader = DataLoader(partition_train_test["train"], batch_size=32, shuffle=True) + testloader = DataLoader(partition_train_test["test"], batch_size=32) + +The Model +--------- + +We defined a simple Convolutional Neural Network (CNN), but feel free to replace it with +a more sophisticated model if you'd like: + +.. code-block:: python + + class Net(nn.Module): + """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" + + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + return self.fc3(x) + +In addition to defining the model architecture, we also include two utility functions to +perform both training (i.e. ``train()``) and evaluation (i.e. ``test()``) using the +above model. These functions should look fairly familiar if you have some prior +experience with PyTorch. Note these functions do not have anything specific to Flower. +That being said, the training function will normally be called, as we'll see later, from +a Flower client passing its own data. In summary, your clients can use standard +training/testing functions to perform local training or evaluation: + +.. code-block:: python + + def train(net, trainloader, epochs, device): + """Train the model on the training set.""" + net.to(device) # move model to GPU if available + criterion = torch.nn.CrossEntropyLoss().to(device) + optimizer = torch.optim.SGD(net.parameters(), lr=0.1, momentum=0.9) + net.train() + running_loss = 0.0 + for _ in range(epochs): + for batch in trainloader: + images = batch["img"] + labels = batch["label"] + optimizer.zero_grad() + loss = criterion(net(images.to(device)), labels.to(device)) + loss.backward() + optimizer.step() + running_loss += loss.item() + + avg_trainloss = running_loss / len(trainloader) + return avg_trainloss + + + def test(net, testloader, device): + """Validate the model on the test set.""" + net.to(device) + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for batch in testloader: + images = batch["img"].to(device) + labels = batch["label"].to(device) + outputs = net(images) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(testloader.dataset) + return loss, accuracy + +The ClientApp +------------- + +The main changes we have to make to use `PyTorch` with `Flower` will be found in the +``get_weights()`` and ``set_weights()`` functions. In ``get_weights()`` PyTorch model +parameters are extracted and represented as a list of NumPy arrays. The +``set_weights()`` function that's the oposite: given a list of NumPy arrays it applies +them to an existing PyTorch model. Doing this in fairly easy in PyTorch. .. note:: - The specific implementation of ``get_weights()`` and - ``set_weights()`` depends on the type of models you use. The ones - shown below work for a wide range of PyTorch models but you might - need to adjust them if you have more exotic model architectures. - -.. code:: python - - def get_weights(net): - return [val.cpu().numpy() for _, val in net.state_dict().items()] - - - def set_weights(net, parameters): - params_dict = zip(net.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - net.load_state_dict(state_dict, strict=True) - -The rest of the functionality is directly inspired by the centralized -case. The ``fit()`` method in the client trains the model using the -local dataset. Similarly, the ``evaluate()`` method is used to evaluate -the model received on a held-out validation set that the client might -have: - -.. code:: python - - class FlowerClient(NumPyClient): - def __init__(self, net, trainloader, valloader, local_epochs): - self.net = net - self.trainloader = trainloader - self.valloader = valloader - self.local_epochs = local_epochs - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - self.net.to(device) - - def fit(self, parameters, config): - set_weights(self.net, parameters) - results = train( - self.net, - self.trainloader, - self.valloader, - self.local_epochs, - self.device, - ) - return get_weights(self.net), len(self.trainloader.dataset), results - - def evaluate(self, parameters, config): - set_weights(self.net, parameters) - loss, accuracy = test(self.net, self.valloader, self.device) - return loss, len(self.valloader.dataset), {"accuracy": accuracy} - -Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` -defined above by means of a ``client_fn()`` callback. Note that the -`context` enables you to get access to hyperparemeters defined in your -``pyproject.toml`` to configure the run. In this tutorial we access the -`local-epochs` setting to control the number of epochs a ``ClientApp`` -will perform when running the ``fit()`` method. You could define + The specific implementation of ``get_weights()`` and ``set_weights()`` depends on + the type of models you use. The ones shown below work for a wide range of PyTorch + models but you might need to adjust them if you have more exotic model + architectures. + +.. code-block:: python + + def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + + def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + +The rest of the functionality is directly inspired by the centralized case. The +``fit()`` method in the client trains the model using the local dataset. Similarly, the +``evaluate()`` method is used to evaluate the model received on a held-out validation +set that the client might have: + +.. code-block:: python + + class FlowerClient(NumPyClient): + def __init__(self, net, trainloader, valloader, local_epochs): + self.net = net + self.trainloader = trainloader + self.valloader = valloader + self.local_epochs = local_epochs + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.net.to(device) + + def fit(self, parameters, config): + set_weights(self.net, parameters) + results = train( + self.net, + self.trainloader, + self.valloader, + self.local_epochs, + self.device, + ) + return get_weights(self.net), len(self.trainloader.dataset), results + + def evaluate(self, parameters, config): + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.valloader, self.device) + return loss, len(self.valloader.dataset), {"accuracy": accuracy} + +Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` defined above by +means of a ``client_fn()`` callback. Note that the `context` enables you to get access +to hyperparemeters defined in your ``pyproject.toml`` to configure the run. In this +tutorial we access the `local-epochs` setting to control the number of epochs a +``ClientApp`` will perform when running the ``fit()`` method. You could define additioinal hyperparameters in ``pyproject.toml`` and access them here. -.. code:: python +.. code-block:: python - def client_fn(context: Context): - # Load model and data - net = Net() - partition_id = context.node_config["partition-id"] - num_partitions = context.node_config["num-partitions"] - trainloader, valloader = load_data(partition_id, num_partitions) - local_epochs = context.run_config["local-epochs"] + def client_fn(context: Context): + # Load model and data + net = Net() + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + trainloader, valloader = load_data(partition_id, num_partitions) + local_epochs = context.run_config["local-epochs"] - # Return Client instance - return FlowerClient(net, trainloader, valloader, local_epochs).to_client() + # Return Client instance + return FlowerClient(net, trainloader, valloader, local_epochs).to_client() - # Flower ClientApp - app = ClientApp(client_fn) + # Flower ClientApp + app = ClientApp(client_fn) -*************** - The ServerApp -*************** +The ServerApp +------------- -To construct a ``ServerApp`` we define a ``server_fn()`` callback with -an identical signature to that of ``client_fn()`` but the return type is -`ServerAppComponents +To construct a ``ServerApp`` we define a ``server_fn()`` callback with an identical +signature to that of ``client_fn()`` but the return type is `ServerAppComponents `_ as opposed to a `Client -`_. -In this example we use the `FedAvg`. To it we pass a randomly -initialized model that will server as the global model to federated. -Note that the value of ``fraction_fit`` is read from the run config. You -can find the default value defined in the ``pyproject.toml``. +`_. In this +example we use the `FedAvg`. To it we pass a randomly initialized model that will server +as the global model to federated. Note that the value of ``fraction_fit`` is read from +the run config. You can find the default value defined in the ``pyproject.toml``. -.. code:: python +.. code-block:: python - def server_fn(context: Context): - # Read from config - num_rounds = context.run_config["num-server-rounds"] - fraction_fit = context.run_config["fraction-fit"] + def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] - # Initialize model parameters - ndarrays = get_weights(Net()) - parameters = ndarrays_to_parameters(ndarrays) + # Initialize model parameters + ndarrays = get_weights(Net()) + parameters = ndarrays_to_parameters(ndarrays) - # Define strategy - strategy = FedAvg( - fraction_fit=fraction_fit, - fraction_evaluate=1.0, - min_available_clients=2, - initial_parameters=parameters, - ) - config = ServerConfig(num_rounds=num_rounds) + # Define strategy + strategy = FedAvg( + fraction_fit=fraction_fit, + fraction_evaluate=1.0, + min_available_clients=2, + initial_parameters=parameters, + ) + config = ServerConfig(num_rounds=num_rounds) - return ServerAppComponents(strategy=strategy, config=config) + return ServerAppComponents(strategy=strategy, config=config) - # Create ServerApp - app = ServerApp(server_fn=server_fn) + # Create ServerApp + app = ServerApp(server_fn=server_fn) -Congratulations! You've successfully built and run your first federated -learning system. +Congratulations! You've successfully built and run your first federated learning system. .. note:: - Check the `source code - `_ - of the extended version of this tutorial in - ``examples/quickstart-pytorch`` in the Flower GitHub repository. + Check the `source code + `_ of the + extended version of this tutorial in ``examples/quickstart-pytorch`` in the Flower + GitHub repository. -**************** - Video tutorial -**************** +Video tutorial +-------------- .. note:: - The video shown below shows how to setup a PyTorch + Flower project - using our previously recommended APIs. A new video tutorial will be - released that shows the new APIs (as the content above does) + The video shown below shows how to setup a PyTorch + Flower project using our + previously recommended APIs. A new video tutorial will be released that shows the + new APIs (as the content above does) .. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with PyTorch to train a CNN model on MNIST. + :description: Check out this Federated Learning quickstart tutorial for using Flower with PyTorch to train a CNN model on MNIST. .. youtube:: jOmmuzMIQ4c - :width: 100% + :width: 100% diff --git a/doc/source/tutorial-quickstart-scikitlearn.rst b/doc/source/tutorial-quickstart-scikitlearn.rst index fc3b58925c06..56bdf18cad17 100644 --- a/doc/source/tutorial-quickstart-scikitlearn.rst +++ b/doc/source/tutorial-quickstart-scikitlearn.rst @@ -1,77 +1,89 @@ .. _quickstart-scikitlearn: - Quickstart scikit-learn ======================= .. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with scikit-learn to train a linear regression model. + :description: Check out this Federated Learning quickstart tutorial for using Flower with scikit-learn to train a linear regression model. -In this tutorial, we will learn how to train a :code:`Logistic Regression` model on MNIST using Flower and scikit-learn. +In this tutorial, we will learn how to train a ``Logistic Regression`` model on MNIST +using Flower and scikit-learn. -It is recommended to create a virtual environment and run everything within this :doc:`virtualenv `. +It is recommended to create a virtual environment and run everything within this +:doc:`virtualenv `. Our example consists of one *server* and two *clients* all having the same model. -*Clients* are responsible for generating individual model parameter updates for the model based on their local datasets. -These updates are then sent to the *server* which will aggregate them to produce an updated global model. Finally, the *server* sends this improved version of the model back to each *client*. -A complete cycle of parameters updates is called a *round*. +*Clients* are responsible for generating individual model parameter updates for the +model based on their local datasets. These updates are then sent to the *server* which +will aggregate them to produce an updated global model. Finally, the *server* sends this +improved version of the model back to each *client*. A complete cycle of parameters +updates is called a *round*. -Now that we have a rough idea of what is going on, let's get started. We first need to install Flower. You can do this by running: +Now that we have a rough idea of what is going on, let's get started. We first need to +install Flower. You can do this by running: .. code-block:: shell - $ pip install flwr + $ pip install flwr Since we want to use scikit-learn, let's go ahead and install it: .. code-block:: shell - $ pip install scikit-learn + $ pip install scikit-learn Or simply install all dependencies using Poetry: .. code-block:: shell - $ poetry install - + $ poetry install Flower Client ------------- -Now that we have all our dependencies installed, let's run a simple distributed training with two clients and one server. -However, before setting up the client and server, we will define all functionalities that we need for our federated learning setup within :code:`utils.py`. The :code:`utils.py` contains different functions defining all the machine learning basics: +Now that we have all our dependencies installed, let's run a simple distributed training +with two clients and one server. However, before setting up the client and server, we +will define all functionalities that we need for our federated learning setup within +``utils.py``. The ``utils.py`` contains different functions defining all the machine +learning basics: -* :code:`get_model_parameters()` - * Returns the parameters of a :code:`sklearn` LogisticRegression model -* :code:`set_model_params()` - * Sets the parameters of a :code:`sklearn` LogisticRegression model -* :code:`set_initial_params()` - * Initializes the model parameters that the Flower server will ask for +- ``get_model_parameters()`` + - Returns the parameters of a ``sklearn`` LogisticRegression model +- ``set_model_params()`` + - Sets the parameters of a ``sklearn`` LogisticRegression model +- ``set_initial_params()`` + - Initializes the model parameters that the Flower server will ask for -Please check out :code:`utils.py` `here `_ for more details. -The pre-defined functions are used in the :code:`client.py` and imported. The :code:`client.py` also requires to import several packages such as Flower and scikit-learn: +Please check out ``utils.py`` `here +`_ for +more details. The pre-defined functions are used in the ``client.py`` and imported. The +``client.py`` also requires to import several packages such as Flower and scikit-learn: .. code-block:: python - import argparse - import warnings - - from sklearn.linear_model import LogisticRegression - from sklearn.metrics import log_loss - - import flwr as fl - import utils - from flwr_datasets import FederatedDataset + import argparse + import warnings + + from sklearn.linear_model import LogisticRegression + from sklearn.metrics import log_loss + + import flwr as fl + import utils + from flwr_datasets import FederatedDataset -Prior to local training, we need to load the MNIST dataset, a popular image classification dataset of handwritten digits for machine learning, and partition the dataset for FL. This can be conveniently achieved using `Flower Datasets `_. -The :code:`FederatedDataset.load_partition()` method loads the partitioned training set for each partition ID defined in the :code:`--partition-id` argument. +Prior to local training, we need to load the MNIST dataset, a popular image +classification dataset of handwritten digits for machine learning, and partition the +dataset for FL. This can be conveniently achieved using `Flower Datasets +`_. The ``FederatedDataset.load_partition()`` method +loads the partitioned training set for each partition ID defined in the +``--partition-id`` argument. .. code-block:: python if __name__ == "__main__": N_CLIENTS = 10 - + parser = argparse.ArgumentParser(description="Flower") parser.add_argument( "--partition-id", @@ -82,17 +94,17 @@ The :code:`FederatedDataset.load_partition()` method loads the partitioned train ) args = parser.parse_args() partition_id = args.partition_id - + fds = FederatedDataset(dataset="mnist", partitioners={"train": N_CLIENTS}) - + dataset = fds.load_partition(partition_id, "train").with_format("numpy") X, y = dataset["image"].reshape((len(dataset), -1)), dataset["label"] - + X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :] y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :] - -Next, the logistic regression model is defined and initialized with :code:`utils.set_initial_params()`. +Next, the logistic regression model is defined and initialized with +``utils.set_initial_params()``. .. code-block:: python @@ -104,28 +116,27 @@ Next, the logistic regression model is defined and initialized with :code:`utils utils.set_initial_params(model) -The Flower server interacts with clients through an interface called -:code:`Client`. When the server selects a particular client for training, it -sends training instructions over the network. The client receives those -instructions and calls one of the :code:`Client` methods to run your code -(i.e., to fit the logistic regression we defined earlier). - -Flower provides a convenience class called :code:`NumPyClient` which makes it -easier to implement the :code:`Client` interface when your workload uses scikit-learn. -Implementing :code:`NumPyClient` usually means defining the following methods -(:code:`set_parameters` is optional though): - -#. :code:`get_parameters` - * return the model weight as a list of NumPy ndarrays -#. :code:`set_parameters` (optional) - * update the local model weights with the parameters received from the server - * is directly imported with :code:`utils.set_model_params()` -#. :code:`fit` - * set the local model weights - * train the local model - * return the updated local model weights -#. :code:`evaluate` - * test the local model +The Flower server interacts with clients through an interface called ``Client``. When +the server selects a particular client for training, it sends training instructions over +the network. The client receives those instructions and calls one of the ``Client`` +methods to run your code (i.e., to fit the logistic regression we defined earlier). + +Flower provides a convenience class called ``NumPyClient`` which makes it easier to +implement the ``Client`` interface when your workload uses scikit-learn. Implementing +``NumPyClient`` usually means defining the following methods (``set_parameters`` is +optional though): + +1. ``get_parameters`` + - return the model weight as a list of NumPy ndarrays +2. ``set_parameters`` (optional) + - update the local model weights with the parameters received from the server + - is directly imported with ``utils.set_model_params()`` +3. ``fit`` + - set the local model weights + - train the local model + - return the updated local model weights +4. ``evaluate`` + - test the local model The methods can be implemented in the following way: @@ -149,27 +160,29 @@ The methods can be implemented in the following way: accuracy = model.score(X_test, y_test) return loss, len(X_test), {"accuracy": accuracy} - -We can now create an instance of our class :code:`MnistClient` and add one line -to actually run this client: +We can now create an instance of our class ``MnistClient`` and add one line to actually +run this client: .. code-block:: python fl.client.start_client("0.0.0.0:8080", client=MnistClient().to_client()) -That's it for the client. We only have to implement :code:`Client` or -:code:`NumPyClient` and call :code:`fl.client.start_client()`. If you implement a client of type :code:`NumPyClient` you'll need to first call its :code:`to_client()` method. The string :code:`"0.0.0.0:8080"` tells the client which server to connect to. In our case we can run the server and the client on the same machine, therefore we use -:code:`"0.0.0.0:8080"`. If we run a truly federated workload with the server and -clients running on different machines, all that needs to change is the -:code:`server_address` we pass to the client. +That's it for the client. We only have to implement ``Client`` or ``NumPyClient`` and +call ``fl.client.start_client()``. If you implement a client of type ``NumPyClient`` +you'll need to first call its ``to_client()`` method. The string ``"0.0.0.0:8080"`` +tells the client which server to connect to. In our case we can run the server and the +client on the same machine, therefore we use ``"0.0.0.0:8080"``. If we run a truly +federated workload with the server and clients running on different machines, all that +needs to change is the ``server_address`` we pass to the client. Flower Server ------------- -The following Flower server is a little bit more advanced and returns an evaluation function for the server-side evaluation. -First, we import again all required libraries such as Flower and scikit-learn. +The following Flower server is a little bit more advanced and returns an evaluation +function for the server-side evaluation. First, we import again all required libraries +such as Flower and scikit-learn. -:code:`server.py`, import Flower and start the server: +``server.py``, import Flower and start the server: .. code-block:: python @@ -179,12 +192,14 @@ First, we import again all required libraries such as Flower and scikit-learn. from sklearn.metrics import log_loss from sklearn.linear_model import LogisticRegression from typing import Dict - + from flwr_datasets import FederatedDataset -The number of federated learning rounds is set in :code:`fit_round()` and the evaluation is defined in :code:`get_evaluate_fn()`. -The evaluation function is called after each federated learning round and gives you information about loss and accuracy. -Note that we also make use of Flower Datasets here to load the test split of the MNIST dataset for server-side evaluation. +The number of federated learning rounds is set in ``fit_round()`` and the evaluation is +defined in ``get_evaluate_fn()``. The evaluation function is called after each federated +learning round and gives you information about loss and accuracy. Note that we also make +use of Flower Datasets here to load the test split of the MNIST dataset for server-side +evaluation. .. code-block:: python @@ -210,7 +225,13 @@ Note that we also make use of Flower Datasets here to load the test split of the return evaluate -The :code:`main` contains the server-side parameter initialization :code:`utils.set_initial_params()` as well as the aggregation strategy :code:`fl.server.strategy:FedAvg()`. The strategy is the default one, federated averaging (or FedAvg), with two clients and evaluation after each federated learning round. The server can be started with the command :code:`fl.server.start_server(server_address="0.0.0.0:8080", strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`. +The ``main`` contains the server-side parameter initialization +``utils.set_initial_params()`` as well as the aggregation strategy +``fl.server.strategy:FedAvg()``. The strategy is the default one, federated averaging +(or FedAvg), with two clients and evaluation after each federated learning round. The +server can be started with the command +``fl.server.start_server(server_address="0.0.0.0:8080", strategy=strategy, +config=fl.server.ServerConfig(num_rounds=3))``. .. code-block:: python @@ -223,21 +244,25 @@ The :code:`main` contains the server-side parameter initialization :code:`utils. evaluate_fn=get_evaluate_fn(model), on_fit_config_fn=fit_round, ) - fl.server.start_server(server_address="0.0.0.0:8080", strategy=strategy, config=fl.server.ServerConfig(num_rounds=3)) - + fl.server.start_server( + server_address="0.0.0.0:8080", + strategy=strategy, + config=fl.server.ServerConfig(num_rounds=3), + ) Train the model, federated! --------------------------- -With both client and server ready, we can now run everything and see federated -learning in action. Federated learning systems usually have a server and multiple clients. We, therefore, have to start the server first: +With both client and server ready, we can now run everything and see federated learning +in action. Federated learning systems usually have a server and multiple clients. We, +therefore, have to start the server first: .. code-block:: shell $ python3 server.py -Once the server is running we can start the clients in different terminals. -Open a new terminal and start the first client: +Once the server is running we can start the clients in different terminals. Open a new +terminal and start the first client: .. code-block:: shell @@ -249,8 +274,8 @@ Open another terminal and start the second client: $ python3 client.py -Each client will have its own dataset. -You should now see how the training does in the very first terminal (the one that started the server): +Each client will have its own dataset. You should now see how the training does in the +very first terminal (the one that started the server): .. code-block:: shell @@ -283,6 +308,7 @@ You should now see how the training does in the very first terminal (the one tha INFO flower 2022-01-13 13:43:21,232 | app.py:122 | app_evaluate: results [('ipv4:127.0.0.1:53980', EvaluateRes(loss=0.5843629240989685, num_examples=10000, accuracy=0.0, metrics={'accuracy': 0.8217})), ('ipv4:127.0.0.1:53982', EvaluateRes(loss=0.5843629240989685, num_examples=10000, accuracy=0.0, metrics={'accuracy': 0.8217}))] INFO flower 2022-01-13 13:43:21,232 | app.py:127 | app_evaluate: failures [] -Congratulations! -You've successfully built and run your first federated learning system. -The full `source code `_ for this example can be found in :code:`examples/sklearn-logreg-mnist`. +Congratulations! You've successfully built and run your first federated learning system. +The full `source code +`_ for this +example can be found in ``examples/sklearn-logreg-mnist``. diff --git a/doc/source/tutorial-quickstart-tensorflow.rst b/doc/source/tutorial-quickstart-tensorflow.rst index ffcd9efeb9bc..66cf69de6390 100644 --- a/doc/source/tutorial-quickstart-tensorflow.rst +++ b/doc/source/tutorial-quickstart-tensorflow.rst @@ -1,307 +1,290 @@ .. _quickstart-tensorflow: -####################### - Quickstart TensorFlow -####################### - -In this tutorial we will learn how to train a Convolutional Neural -Network on CIFAR-10 using the Flower framework and TensorFlow. First of -all, it is recommended to create a virtual environment and run -everything within a :doc:`virtualenv +Quickstart TensorFlow +===================== + +In this tutorial we will learn how to train a Convolutional Neural Network on CIFAR-10 +using the Flower framework and TensorFlow. First of all, it is recommended to create a +virtual environment and run everything within a :doc:`virtualenv `. -Let's use `flwr new` to create a complete Flower+TensorFlow project. It -will generate all the files needed to run, by default with the Flower -Simulation Engine, a federation of 10 nodes using `FedAvg +Let's use `flwr new` to create a complete Flower+TensorFlow project. It will generate +all the files needed to run, by default with the Flower Simulation Engine, a federation +of 10 nodes using `FedAvg `_. The dataset will be partitioned using Flower Dataset's `IidPartitioner `_. -Now that we have a rough idea of what this example is about, let's get -started. First, install Flower in your new environment: +Now that we have a rough idea of what this example is about, let's get started. First, +install Flower in your new environment: -.. code:: shell +.. code-block:: shell - # In a new Python environment - $ pip install flwr + # In a new Python environment + $ pip install flwr -Then, run the command below. You will be prompted to select one of the -available templates (choose ``TensorFlow``), give a name to your -project, and type in your developer name: +Then, run the command below. You will be prompted to select one of the available +templates (choose ``TensorFlow``), give a name to your project, and type in your +developer name: -.. code:: shell +.. code-block:: shell - $ flwr new + $ flwr new -After running it you'll notice a new directory with your project name -has been created. It should have the following structure: +After running it you'll notice a new directory with your project name has been created. +It should have the following structure: -.. code:: shell +.. code-block:: shell - - ├── - │ ├── __init__.py - │ ├── client_app.py # Defines your ClientApp - │ ├── server_app.py # Defines your ServerApp - │ └── task.py # Defines your model, training and data loading - ├── pyproject.toml # Project metadata like dependencies and configs - └── README.md + + ├── + │ ├── __init__.py + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md -If you haven't yet installed the project and its dependencies, you can -do so by: +If you haven't yet installed the project and its dependencies, you can do so by: -.. code:: shell +.. code-block:: shell - # From the directory where your pyproject.toml is - $ pip install -e . + # From the directory where your pyproject.toml is + $ pip install -e . To run the project, do: -.. code:: shell +.. code-block:: shell - # Run with default arguments - $ flwr run . + # Run with default arguments + $ flwr run . With default arguments you will see an output like this one: -.. code:: shell - - Loading project configuration... - Success - INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout - INFO : - INFO : [INIT] - INFO : Using initial global parameters provided by strategy - INFO : Starting evaluation of initial global parameters - INFO : Evaluation returned no results (`None`) - INFO : - INFO : [ROUND 1] - INFO : configure_fit: strategy sampled 10 clients (out of 10) - INFO : aggregate_fit: received 10 results and 0 failures - WARNING : No fit_metrics_aggregation_fn provided - INFO : configure_evaluate: strategy sampled 10 clients (out of 10) - INFO : aggregate_evaluate: received 10 results and 0 failures - WARNING : No evaluate_metrics_aggregation_fn provided - INFO : - INFO : [ROUND 2] - INFO : configure_fit: strategy sampled 10 clients (out of 10) - INFO : aggregate_fit: received 10 results and 0 failures - INFO : configure_evaluate: strategy sampled 10 clients (out of 10) - INFO : aggregate_evaluate: received 10 results and 0 failures - INFO : - INFO : [ROUND 3] - INFO : configure_fit: strategy sampled 10 clients (out of 10) - INFO : aggregate_fit: received 10 results and 0 failures - INFO : configure_evaluate: strategy sampled 10 clients (out of 10) - INFO : aggregate_evaluate: received 10 results and 0 failures - INFO : - INFO : [SUMMARY] - INFO : Run finished 3 round(s) in 31.31s - INFO : History (loss, distributed): - INFO : round 1: 1.9066195368766785 - INFO : round 2: 1.657227087020874 - INFO : round 3: 1.559039831161499 - INFO : - -You can also override the parameters defined in the -``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this: - -.. code:: shell - - # Override some arguments - $ flwr run . --run-config "num-server-rounds=5 batch-size=16" - -********** - The Data -********** - -This tutorial uses `Flower Datasets `_ -to easily download and partition the `CIFAR-10` dataset. In this example -you'll make use of the `IidPartitioner +.. code-block:: shell + + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Using initial global parameters provided by strategy + INFO : Starting evaluation of initial global parameters + INFO : Evaluation returned no results (`None`) + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 31.31s + INFO : History (loss, distributed): + INFO : round 1: 1.9066195368766785 + INFO : round 2: 1.657227087020874 + INFO : round 3: 1.559039831161499 + INFO : + +You can also override the parameters defined in the ``[tool.flwr.app.config]`` section +in ``pyproject.toml`` like this: + +.. code-block:: shell + + # Override some arguments + $ flwr run . --run-config "num-server-rounds=5 batch-size=16" + +The Data +-------- + +This tutorial uses `Flower Datasets `_ to easily +download and partition the `CIFAR-10` dataset. In this example you'll make use of the +`IidPartitioner `_ -to generate `num_partitions` partitions. You can choose `other -partitioners -`_ -available in Flower Datasets. Each ``ClientApp`` will call this function -to create the ``NumPy`` arrays that correspond to their data partition. - -.. code:: python - - partitioner = IidPartitioner(num_partitions=num_partitions) - fds = FederatedDataset( - dataset="uoft-cs/cifar10", - partitioners={"train": partitioner}, - ) - partition = fds.load_partition(partition_id, "train") - partition.set_format("numpy") - - # Divide data on each node: 80% train, 20% test - partition = partition.train_test_split(test_size=0.2) - x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"] - x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"] - -*********** - The Model -*********** - -Next, we need a model. We defined a simple Convolutional Neural Network -(CNN), but feel free to replace it with a more sophisticated model if -you'd like: - -.. code:: python - - def load_model(learning_rate: float = 0.001): - # Define a simple CNN for CIFAR-10 and set Adam optimizer - model = keras.Sequential( - [ - keras.Input(shape=(32, 32, 3)), - layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), - layers.MaxPooling2D(pool_size=(2, 2)), - layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), - layers.MaxPooling2D(pool_size=(2, 2)), - layers.Flatten(), - layers.Dropout(0.5), - layers.Dense(10, activation="softmax"), - ] - ) - model.compile( - "adam", - loss="sparse_categorical_crossentropy", - metrics=["accuracy"], - ) - return model - -*************** - The ClientApp -*************** - -With `TensorFlow`, we can use the built-in ``get_weights()`` and -``set_weights()`` functions, which simplifies the implementation with -`Flower`. The rest of the functionality in the ClientApp is directly -inspired by the centralized case. The ``fit()`` method in the client -trains the model using the local dataset. Similarly, the ``evaluate()`` -method is used to evaluate the model received on a held-out validation +to generate `num_partitions` partitions. You can choose `other partitioners +`_ available in +Flower Datasets. Each ``ClientApp`` will call this function to create the ``NumPy`` +arrays that correspond to their data partition. + +.. code-block:: python + + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id, "train") + partition.set_format("numpy") + + # Divide data on each node: 80% train, 20% test + partition = partition.train_test_split(test_size=0.2) + x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"] + x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"] + +The Model +--------- + +Next, we need a model. We defined a simple Convolutional Neural Network (CNN), but feel +free to replace it with a more sophisticated model if you'd like: + +.. code-block:: python + + def load_model(learning_rate: float = 0.001): + # Define a simple CNN for CIFAR-10 and set Adam optimizer + model = keras.Sequential( + [ + keras.Input(shape=(32, 32, 3)), + layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(10, activation="softmax"), + ] + ) + model.compile( + "adam", + loss="sparse_categorical_crossentropy", + metrics=["accuracy"], + ) + return model + +The ClientApp +------------- + +With `TensorFlow`, we can use the built-in ``get_weights()`` and ``set_weights()`` +functions, which simplifies the implementation with `Flower`. The rest of the +functionality in the ClientApp is directly inspired by the centralized case. The +``fit()`` method in the client trains the model using the local dataset. Similarly, the +``evaluate()`` method is used to evaluate the model received on a held-out validation set that the client might have: -.. code:: python - - class FlowerClient(NumPyClient): - def __init__(self, model, data, epochs, batch_size, verbose): - self.model = model - self.x_train, self.y_train, self.x_test, self.y_test = data - self.epochs = epochs - self.batch_size = batch_size - self.verbose = verbose - - def fit(self, parameters, config): - self.model.set_weights(parameters) - self.model.fit( - self.x_train, - self.y_train, - epochs=self.epochs, - batch_size=self.batch_size, - verbose=self.verbose, - ) - return self.model.get_weights(), len(self.x_train), {} - - def evaluate(self, parameters, config): - self.model.set_weights(parameters) - loss, accuracy = self.model.evaluate(self.x_test, self.y_test, verbose=0) - return loss, len(self.x_test), {"accuracy": accuracy} - -Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` -defined above by means of a ``client_fn()`` callback. Note that the -`context` enables you to get access to hyperparameters defined in your -``pyproject.toml`` to configure the run. For example, in this tutorial -we access the `local-epochs` setting to control the number of epochs a -``ClientApp`` will perform when running the ``fit()`` method, in -addition to `batch-size`. You could define additional hyperparameters in -``pyproject.toml`` and access them here. - -.. code:: python - - def client_fn(context: Context): - # Load model and data - net = load_model() - - partition_id = context.node_config["partition-id"] - num_partitions = context.node_config["num-partitions"] - data = load_data(partition_id, num_partitions) - epochs = context.run_config["local-epochs"] - batch_size = context.run_config["batch-size"] - verbose = context.run_config.get("verbose") - - # Return Client instance - return FlowerClient( - net, data, epochs, batch_size, verbose - ).to_client() - - - # Flower ClientApp - app = ClientApp(client_fn=client_fn) - -*************** - The ServerApp -*************** - -To construct a ``ServerApp`` we define a ``server_fn()`` callback with -an identical signature to that of ``client_fn()`` but the return type is -`ServerAppComponents +.. code-block:: python + + class FlowerClient(NumPyClient): + def __init__(self, model, data, epochs, batch_size, verbose): + self.model = model + self.x_train, self.y_train, self.x_test, self.y_test = data + self.epochs = epochs + self.batch_size = batch_size + self.verbose = verbose + + def fit(self, parameters, config): + self.model.set_weights(parameters) + self.model.fit( + self.x_train, + self.y_train, + epochs=self.epochs, + batch_size=self.batch_size, + verbose=self.verbose, + ) + return self.model.get_weights(), len(self.x_train), {} + + def evaluate(self, parameters, config): + self.model.set_weights(parameters) + loss, accuracy = self.model.evaluate(self.x_test, self.y_test, verbose=0) + return loss, len(self.x_test), {"accuracy": accuracy} + +Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` defined above by +means of a ``client_fn()`` callback. Note that the `context` enables you to get access +to hyperparameters defined in your ``pyproject.toml`` to configure the run. For example, +in this tutorial we access the `local-epochs` setting to control the number of epochs a +``ClientApp`` will perform when running the ``fit()`` method, in addition to +`batch-size`. You could define additional hyperparameters in ``pyproject.toml`` and +access them here. + +.. code-block:: python + + def client_fn(context: Context): + # Load model and data + net = load_model() + + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + data = load_data(partition_id, num_partitions) + epochs = context.run_config["local-epochs"] + batch_size = context.run_config["batch-size"] + verbose = context.run_config.get("verbose") + + # Return Client instance + return FlowerClient(net, data, epochs, batch_size, verbose).to_client() + + + # Flower ClientApp + app = ClientApp(client_fn=client_fn) + +The ServerApp +------------- + +To construct a ``ServerApp`` we define a ``server_fn()`` callback with an identical +signature to that of ``client_fn()`` but the return type is `ServerAppComponents `_ as opposed to a `Client -`_. -In this example we use the `FedAvg`. To it we pass a randomly -initialized model that will serve as the global model to federate. +`_. In this +example we use the `FedAvg`. To it we pass a randomly initialized model that will serve +as the global model to federate. -.. code:: python +.. code-block:: python - def server_fn(context: Context): - # Read from config - num_rounds = context.run_config["num-server-rounds"] + def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] - # Get parameters to initialize global model - parameters = ndarrays_to_parameters(load_model().get_weights()) + # Get parameters to initialize global model + parameters = ndarrays_to_parameters(load_model().get_weights()) - # Define strategy - strategy = strategy = FedAvg( - fraction_fit=1.0, - fraction_evaluate=1.0, - min_available_clients=2, - initial_parameters=parameters, - ) - config = ServerConfig(num_rounds=num_rounds) + # Define strategy + strategy = strategy = FedAvg( + fraction_fit=1.0, + fraction_evaluate=1.0, + min_available_clients=2, + initial_parameters=parameters, + ) + config = ServerConfig(num_rounds=num_rounds) - return ServerAppComponents(strategy=strategy, config=config) + return ServerAppComponents(strategy=strategy, config=config) - # Create ServerApp - app = ServerApp(server_fn=server_fn) -Congratulations! You've successfully built and run your first federated -learning system. + # Create ServerApp + app = ServerApp(server_fn=server_fn) -.. note:: +Congratulations! You've successfully built and run your first federated learning system. - Check the source code of the extended version of this tutorial in - |quickstart_tf_link|_ in the Flower GitHub repository. +.. note:: -.. |quickstart_tf_link| replace:: + Check the source code of the extended version of this tutorial in + |quickstart_tf_link|_ in the Flower GitHub repository. - :code:`examples/quickstart-tensorflow` +.. |quickstart_tf_link| replace:: ``examples/quickstart-tensorflow`` .. _quickstart_tf_link: https://github.com/adap/flower/blob/main/examples/quickstart-tensorflow -**************** - Video tutorial -**************** +Video tutorial +-------------- .. note:: - The video shown below shows how to setup a TensorFlow + Flower - project using our previously recommended APIs. A new video tutorial - will be released that shows the new APIs (as the content above does) + The video shown below shows how to setup a TensorFlow + Flower project using our + previously recommended APIs. A new video tutorial will be released that shows the + new APIs (as the content above does) .. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with TensorFlow to train a CNN model on CIFAR-10. + :description: Check out this Federated Learning quickstart tutorial for using Flower with TensorFlow to train a CNN model on CIFAR-10. .. youtube:: FGTc2TQq7VM - :width: 100% + :width: 100% diff --git a/doc/source/tutorial-quickstart-xgboost.rst b/doc/source/tutorial-quickstart-xgboost.rst index 34ad5f6e99c0..fe15227fdf11 100644 --- a/doc/source/tutorial-quickstart-xgboost.rst +++ b/doc/source/tutorial-quickstart-xgboost.rst @@ -1,63 +1,75 @@ .. _quickstart-xgboost: - Quickstart XGBoost -===================== +================== .. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with XGBoost to train classification models on trees. + :description: Check out this Federated Learning quickstart tutorial for using Flower with XGBoost to train classification models on trees. -.. youtube:: AY1vpXUpesc - :width: 100% +.. youtube:: AY1vpXUpesc + :width: 100% Federated XGBoost -------------------- +----------------- -EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient implementation of gradient-boosted decision tree (**GBDT**), that maximises the computational boundaries for boosted tree methods. -It's primarily designed to enhance both the performance and computational speed of machine learning models. -In XGBoost, trees are constructed concurrently, unlike the sequential approach taken by GBDT. +EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient implementation of +gradient-boosted decision tree (**GBDT**), that maximises the computational boundaries +for boosted tree methods. It's primarily designed to enhance both the performance and +computational speed of machine learning models. In XGBoost, trees are constructed +concurrently, unlike the sequential approach taken by GBDT. -Often, for tabular data on medium-sized datasets with fewer than 10k training examples, XGBoost surpasses the results of deep learning techniques. +Often, for tabular data on medium-sized datasets with fewer than 10k training examples, +XGBoost surpasses the results of deep learning techniques. Why federated XGBoost? -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Indeed, as the demand for data privacy and decentralized learning grows, there's an increasing requirement to implement federated XGBoost systems for specialised applications, like survival analysis and financial fraud detection. +~~~~~~~~~~~~~~~~~~~~~~ -Federated learning ensures that raw data remains on the local device, making it an attractive approach for sensitive domains where data security and privacy are paramount. -Given the robustness and efficiency of XGBoost, combining it with federated learning offers a promising solution for these specific challenges. +Indeed, as the demand for data privacy and decentralized learning grows, there's an +increasing requirement to implement federated XGBoost systems for specialised +applications, like survival analysis and financial fraud detection. -In this tutorial we will learn how to train a federated XGBoost model on HIGGS dataset using Flower and :code:`xgboost` package. -We use a simple example (`full code xgboost-quickstart `_) with two *clients* and one *server* -to demonstrate how federated XGBoost works, -and then we dive into a more complex example (`full code xgboost-comprehensive `_) to run various experiments. +Federated learning ensures that raw data remains on the local device, making it an +attractive approach for sensitive domains where data security and privacy are paramount. +Given the robustness and efficiency of XGBoost, combining it with federated learning +offers a promising solution for these specific challenges. +In this tutorial we will learn how to train a federated XGBoost model on HIGGS dataset +using Flower and ``xgboost`` package. We use a simple example (`full code +xgboost-quickstart +`_) with two +*clients* and one *server* to demonstrate how federated XGBoost works, and then we dive +into a more complex example (`full code xgboost-comprehensive +`_) to run +various experiments. Environment Setup --------------------- +----------------- -First of all, it is recommended to create a virtual environment and run everything within a :doc:`virtualenv `. +First of all, it is recommended to create a virtual environment and run everything +within a :doc:`virtualenv `. We first need to install Flower and Flower Datasets. You can do this by running : .. code-block:: shell - $ pip install flwr flwr-datasets + $ pip install flwr flwr-datasets -Since we want to use :code:`xgboost` package to build up XGBoost trees, let's go ahead and install :code:`xgboost`: +Since we want to use ``xgboost`` package to build up XGBoost trees, let's go ahead and +install ``xgboost``: .. code-block:: shell - $ pip install xgboost - + $ pip install xgboost Flower Client ------------------ +------------- -*Clients* are responsible for generating individual weight-updates for the model based on their local datasets. -Now that we have all our dependencies installed, let's run a simple distributed training with two clients and one server. +*Clients* are responsible for generating individual weight-updates for the model based +on their local datasets. Now that we have all our dependencies installed, let's run a +simple distributed training with two clients and one server. -In a file called :code:`client.py`, import xgboost, Flower, Flower Datasets and other related functions: +In a file called ``client.py``, import xgboost, Flower, Flower Datasets and other +related functions: .. code-block:: python @@ -84,9 +96,10 @@ In a file called :code:`client.py`, import xgboost, Flower, Flower Datasets and from flwr_datasets.partitioner import IidPartitioner Dataset partition and hyper-parameter selection -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Prior to local training, we require loading the HIGGS dataset from Flower Datasets and conduct data partitioning for FL: +Prior to local training, we require loading the HIGGS dataset from Flower Datasets and +conduct data partitioning for FL: .. code-block:: python @@ -99,8 +112,9 @@ Prior to local training, we require loading the HIGGS dataset from Flower Datase partition = fds.load_partition(partition_id=args.partition_id, split="train") partition.set_format("numpy") -In this example, we split the dataset into 30 partitions with uniform distribution (:code:`IidPartitioner(num_partitions=30)`). -Then, we load the partition for the given client based on :code:`partition_id`: +In this example, we split the dataset into 30 partitions with uniform distribution +(``IidPartitioner(num_partitions=30)``). Then, we load the partition for the given +client based on ``partition_id``: .. code-block:: python @@ -118,7 +132,8 @@ Then, we load the partition for the given client based on :code:`partition_id`: partition = fds.load_partition(idx=args.partition_id, split="train") partition.set_format("numpy") -After that, we do train/test splitting on the given partition (client's local data), and transform data format for :code:`xgboost` package. +After that, we do train/test splitting on the given partition (client's local data), and +transform data format for ``xgboost`` package. .. code-block:: python @@ -131,7 +146,8 @@ After that, we do train/test splitting on the given partition (client's local da train_dmatrix = transform_dataset_to_dmatrix(train_data) valid_dmatrix = transform_dataset_to_dmatrix(valid_data) -The functions of :code:`train_test_split` and :code:`transform_dataset_to_dmatrix` are defined as below: +The functions of ``train_test_split`` and ``transform_dataset_to_dmatrix`` are defined +as below: .. code-block:: python @@ -171,40 +187,39 @@ Finally, we define the hyper-parameters used for XGBoost training. "tree_method": "hist", } -The :code:`num_local_round` represents the number of iterations for local tree boost. -We use CPU for the training in default. -One can shift it to GPU by setting :code:`tree_method` to :code:`gpu_hist`. -We use AUC as evaluation metric. - +The ``num_local_round`` represents the number of iterations for local tree boost. We use +CPU for the training in default. One can shift it to GPU by setting ``tree_method`` to +``gpu_hist``. We use AUC as evaluation metric. Flower client definition for XGBoost -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -After loading the dataset we define the Flower client. -We follow the general rule to define :code:`XgbClient` class inherited from :code:`fl.client.Client`. +After loading the dataset we define the Flower client. We follow the general rule to +define ``XgbClient`` class inherited from ``fl.client.Client``. .. code-block:: python class XgbClient(fl.client.Client): - def __init__( - self, - train_dmatrix, - valid_dmatrix, - num_train, - num_val, - num_local_round, - params, - ): - self.train_dmatrix = train_dmatrix - self.valid_dmatrix = valid_dmatrix - self.num_train = num_train - self.num_val = num_val - self.num_local_round = num_local_round - self.params = params - -All required parameters defined above are passed to :code:`XgbClient`'s constructor. - -Then, we override :code:`get_parameters`, :code:`fit` and :code:`evaluate` methods insides :code:`XgbClient` class as follows. + def __init__( + self, + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + params, + ): + self.train_dmatrix = train_dmatrix + self.valid_dmatrix = valid_dmatrix + self.num_train = num_train + self.num_val = num_val + self.num_local_round = num_local_round + self.params = params + +All required parameters defined above are passed to ``XgbClient``'s constructor. + +Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods insides +``XgbClient`` class as follows. .. code-block:: python @@ -218,9 +233,10 @@ Then, we override :code:`get_parameters`, :code:`fit` and :code:`evaluate` metho parameters=Parameters(tensor_type="", tensors=[]), ) -Unlike neural network training, XGBoost trees are not started from a specified random weights. -In this case, we do not use :code:`get_parameters` and :code:`set_parameters` to initialise model parameters for XGBoost. -As a result, let's return an empty tensor in :code:`get_parameters` when it is called by the server at the first round. +Unlike neural network training, XGBoost trees are not started from a specified random +weights. In this case, we do not use ``get_parameters`` and ``set_parameters`` to +initialise model parameters for XGBoost. As a result, let's return an empty tensor in +``get_parameters`` when it is called by the server at the first round. .. code-block:: python @@ -259,9 +275,10 @@ As a result, let's return an empty tensor in :code:`get_parameters` when it is c metrics={}, ) -In :code:`fit`, at the first round, we call :code:`xgb.train()` to build up the first set of trees. -From the second round, we load the global model sent from server to new build Booster object, -and then update model weights on local training data with function :code:`local_boost` as follows: +In ``fit``, at the first round, we call ``xgb.train()`` to build up the first set of +trees. From the second round, we load the global model sent from server to new build +Booster object, and then update model weights on local training data with function +``local_boost`` as follows: .. code-block:: python @@ -278,8 +295,8 @@ and then update model weights on local training data with function :code:`local_ return bst -Given :code:`num_local_round`, we update trees by calling :code:`bst_input.update` method. -After training, the last :code:`N=num_local_round` trees will be extracted to send to the server. +Given ``num_local_round``, we update trees by calling ``bst_input.update`` method. After +training, the last ``N=num_local_round`` trees will be extracted to send to the server. .. code-block:: python @@ -310,40 +327,42 @@ After training, the last :code:`N=num_local_round` trees will be extracted to se metrics={"AUC": auc}, ) -In :code:`evaluate`, after loading the global model, we call :code:`bst.eval_set` function to conduct evaluation on valid set. -The AUC value will be returned. +In ``evaluate``, after loading the global model, we call ``bst.eval_set`` function to +conduct evaluation on valid set. The AUC value will be returned. -Now, we can create an instance of our class :code:`XgbClient` and add one line to actually run this client: +Now, we can create an instance of our class ``XgbClient`` and add one line to actually +run this client: .. code-block:: python - fl.client.start_client( - server_address="127.0.0.1:8080", - client=XgbClient( - train_dmatrix, - valid_dmatrix, - num_train, - num_val, - num_local_round, - params, - ).to_client(), - ) - -That's it for the client. We only have to implement :code:`Client` and call :code:`fl.client.start_client()`. -The string :code:`"[::]:8080"` tells the client which server to connect to. -In our case we can run the server and the client on the same machine, therefore we use -:code:`"[::]:8080"`. If we run a truly federated workload with the server and -clients running on different machines, all that needs to change is the -:code:`server_address` we point the client at. + fl.client.start_client( + server_address="127.0.0.1:8080", + client=XgbClient( + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + params, + ).to_client(), + ) +That's it for the client. We only have to implement ``Client`` and call +``fl.client.start_client()``. The string ``"[::]:8080"`` tells the client which server +to connect to. In our case we can run the server and the client on the same machine, +therefore we use ``"[::]:8080"``. If we run a truly federated workload with the server +and clients running on different machines, all that needs to change is the +``server_address`` we point the client at. Flower Server ------------------- +------------- -These updates are then sent to the *server* which will aggregate them to produce a better model. -Finally, the *server* sends this improved version of the model back to each *client* to finish a complete FL round. +These updates are then sent to the *server* which will aggregate them to produce a +better model. Finally, the *server* sends this improved version of the model back to +each *client* to finish a complete FL round. -In a file named :code:`server.py`, import Flower and FedXgbBagging from :code:`flwr.server.strategy`. +In a file named ``server.py``, import Flower and FedXgbBagging from +``flwr.server.strategy``. We first define a strategy for XGBoost bagging aggregation. @@ -361,6 +380,7 @@ We first define a strategy for XGBoost bagging aggregation. on_fit_config_fn=config_func, ) + def evaluate_metrics_aggregation(eval_metrics): """Return an aggregated metric (AUC) for evaluation.""" total_num = sum([num for num, _ in eval_metrics]) @@ -370,6 +390,7 @@ We first define a strategy for XGBoost bagging aggregation. metrics_aggregated = {"AUC": auc_aggregated} return metrics_aggregated + def config_func(rnd: int) -> Dict[str, str]: """Return a configuration with global epochs.""" config = { @@ -377,9 +398,10 @@ We first define a strategy for XGBoost bagging aggregation. } return config -We use two clients for this example. -An :code:`evaluate_metrics_aggregation` function is defined to collect and wighted average the AUC values from clients. -The :code:`config_func` function is to return the current FL round number to client's :code:`fit()` and :code:`evaluate()` methods. +We use two clients for this example. An ``evaluate_metrics_aggregation`` function is +defined to collect and wighted average the AUC values from clients. The ``config_func`` +function is to return the current FL round number to client's ``fit()`` and +``evaluate()`` methods. Then, we start the server: @@ -393,12 +415,13 @@ Then, we start the server: ) Tree-based bagging aggregation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You must be curious about how bagging aggregation works. Let's look into the details. -In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define :code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`. -Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` and :code:`evaluate` methods as follows: +In file ``flwr.server.strategy.fedxgb_bagging.py``, we define ``FedXgbBagging`` +inherited from ``flwr.server.strategy.FedAvg``. Then, we override the ``aggregate_fit``, +``aggregate_evaluate`` and ``evaluate`` methods as follows: .. code-block:: python @@ -493,7 +516,8 @@ Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` and :cod loss, metrics = eval_res return loss, metrics -In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost trees by calling :code:`aggregate()` function: +In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost trees by calling +``aggregate()`` function: .. code-block:: python @@ -552,28 +576,27 @@ In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost trees b ) return tree_num, paral_tree_num -In this function, we first fetch the number of trees and the number of parallel trees for the current and previous model -by calling :code:`_get_tree_nums`. -Then, the fetched information will be aggregated. -After that, the trees (containing model weights) are aggregated to generate a new tree model. - -After traversal of all clients' models, a new global model is generated, -followed by the serialisation, and sending back to each client. +In this function, we first fetch the number of trees and the number of parallel trees +for the current and previous model by calling ``_get_tree_nums``. Then, the fetched +information will be aggregated. After that, the trees (containing model weights) are +aggregated to generate a new tree model. +After traversal of all clients' models, a new global model is generated, followed by the +serialisation, and sending back to each client. Launch Federated XGBoost! -------------------------------- +------------------------- -With both client and server ready, we can now run everything and see federated -learning in action. FL systems usually have a server and multiple clients. We -therefore have to start the server first: +With both client and server ready, we can now run everything and see federated learning +in action. FL systems usually have a server and multiple clients. We therefore have to +start the server first: .. code-block:: shell $ python3 server.py -Once the server is running we can start the clients in different terminals. -Open a new terminal and start the first client: +Once the server is running we can start the clients in different terminals. Open a new +terminal and start the first client: .. code-block:: shell @@ -585,8 +608,8 @@ Open another terminal and start the second client: $ python3 client.py --partition-id=1 -Each client will have its own dataset. -You should now see how the training does in the very first terminal (the one that started the server): +Each client will have its own dataset. You should now see how the training does in the +very first terminal (the one that started the server): .. code-block:: shell @@ -629,192 +652,197 @@ You should now see how the training does in the very first terminal (the one tha INFO : INFO : [SUMMARY] INFO : Run finished 5 round(s) in 1.67s - INFO : History (loss, distributed): - INFO : round 1: 0 - INFO : round 2: 0 - INFO : round 3: 0 - INFO : round 4: 0 - INFO : round 5: 0 - INFO : History (metrics, distributed, evaluate): - INFO : {'AUC': [(1, 0.76755), (2, 0.775), (3, 0.77935), (4, 0.7836), (5, 0.7872)]} - -Congratulations! -You've successfully built and run your first federated XGBoost system. -The AUC values can be checked in :code:`metrics_distributed`. -One can see that the average AUC increases over FL rounds. - -The full `source code `_ for this example can be found in :code:`examples/xgboost-quickstart`. - + INFO : History (loss, distributed): + INFO : round 1: 0 + INFO : round 2: 0 + INFO : round 3: 0 + INFO : round 4: 0 + INFO : round 5: 0 + INFO : History (metrics, distributed, evaluate): + INFO : {'AUC': [(1, 0.76755), (2, 0.775), (3, 0.77935), (4, 0.7836), (5, 0.7872)]} + +Congratulations! You've successfully built and run your first federated XGBoost system. +The AUC values can be checked in ``metrics_distributed``. One can see that the average +AUC increases over FL rounds. + +The full `source code +`_ for this +example can be found in ``examples/xgboost-quickstart``. Comprehensive Federated XGBoost ------------------------------------ +------------------------------- -Now that you have known how federated XGBoost work with Flower, it's time to run some more comprehensive experiments by customising the experimental settings. -In the xgboost-comprehensive example (`full code `_), -we provide more options to define various experimental setups, including aggregation strategies, data partitioning and centralised/distributed evaluation. -We also support :doc:`Flower simulation ` making it easy to simulate large client cohorts in a resource-aware manner. -Let's take a look! +Now that you have known how federated XGBoost work with Flower, it's time to run some +more comprehensive experiments by customising the experimental settings. In the +xgboost-comprehensive example (`full code +`_), we provide +more options to define various experimental setups, including aggregation strategies, +data partitioning and centralised/distributed evaluation. We also support :doc:`Flower +simulation ` making it easy to simulate large client cohorts in +a resource-aware manner. Let's take a look! Cyclic training -~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~ -In addition to bagging aggregation, we offer a cyclic training scheme, which performs FL in a client-by-client fashion. -Instead of aggregating multiple clients, there is only one single client participating in the training per round in the cyclic training scenario. -The trained local XGBoost trees will be passed to the next client as an initialised model for next round's boosting. +In addition to bagging aggregation, we offer a cyclic training scheme, which performs FL +in a client-by-client fashion. Instead of aggregating multiple clients, there is only +one single client participating in the training per round in the cyclic training +scenario. The trained local XGBoost trees will be passed to the next client as an +initialised model for next round's boosting. -To do this, we first customise a :code:`ClientManager` in :code:`server_utils.py`: +To do this, we first customise a ``ClientManager`` in ``server_utils.py``: .. code-block:: python - class CyclicClientManager(SimpleClientManager): - """Provides a cyclic client selection rule.""" - - def sample( - self, - num_clients: int, - min_num_clients: Optional[int] = None, - criterion: Optional[Criterion] = None, - ) -> List[ClientProxy]: - """Sample a number of Flower ClientProxy instances.""" - - # Block until at least num_clients are connected. - if min_num_clients is None: - min_num_clients = num_clients - self.wait_for(min_num_clients) - - # Sample clients which meet the criterion - available_cids = list(self.clients) - if criterion is not None: - available_cids = [ - cid for cid in available_cids if criterion.select(self.clients[cid]) - ] - - if num_clients > len(available_cids): - log( - INFO, - "Sampling failed: number of available clients" - " (%s) is less than number of requested clients (%s).", - len(available_cids), - num_clients, - ) - return [] - - # Return all available clients - return [self.clients[cid] for cid in available_cids] - -The customised :code:`ClientManager` samples all available clients in each FL round based on the order of connection to the server. -Then, we define a new strategy :code:`FedXgbCyclic` in :code:`flwr.server.strategy.fedxgb_cyclic.py`, -in order to sequentially select only one client in given round and pass the received model to next client. + class CyclicClientManager(SimpleClientManager): + """Provides a cyclic client selection rule.""" + + def sample( + self, + num_clients: int, + min_num_clients: Optional[int] = None, + criterion: Optional[Criterion] = None, + ) -> List[ClientProxy]: + """Sample a number of Flower ClientProxy instances.""" + + # Block until at least num_clients are connected. + if min_num_clients is None: + min_num_clients = num_clients + self.wait_for(min_num_clients) + + # Sample clients which meet the criterion + available_cids = list(self.clients) + if criterion is not None: + available_cids = [ + cid for cid in available_cids if criterion.select(self.clients[cid]) + ] + + if num_clients > len(available_cids): + log( + INFO, + "Sampling failed: number of available clients" + " (%s) is less than number of requested clients (%s).", + len(available_cids), + num_clients, + ) + return [] + + # Return all available clients + return [self.clients[cid] for cid in available_cids] + +The customised ``ClientManager`` samples all available clients in each FL round based on +the order of connection to the server. Then, we define a new strategy ``FedXgbCyclic`` +in ``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially select only one +client in given round and pass the received model to next client. .. code-block:: python - class FedXgbCyclic(FedAvg): - """Configurable FedXgbCyclic strategy implementation.""" - - # pylint: disable=too-many-arguments,too-many-instance-attributes, line-too-long - def __init__( - self, - **kwargs: Any, - ): - self.global_model: Optional[bytes] = None - super().__init__(**kwargs) - - def aggregate_fit( - self, - server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: - """Aggregate fit results using bagging.""" - if not results: - return None, {} - # Do not aggregate if there are failures and failures are not accepted - if not self.accept_failures and failures: - return None, {} - - # Fetch the client model from last round as global model - for _, fit_res in results: - update = fit_res.parameters.tensors - for bst in update: - self.global_model = bst - - return ( - Parameters(tensor_type="", tensors=[cast(bytes, self.global_model)]), - {}, - ) - -Unlike the original :code:`FedAvg`, we don't perform aggregation here. -Instead, we just make a copy of the received client model as global model by overriding :code:`aggregate_fit`. - -Also, the customised :code:`configure_fit` and :code:`configure_evaluate` methods ensure the clients to be sequentially selected given FL round: + class FedXgbCyclic(FedAvg): + """Configurable FedXgbCyclic strategy implementation.""" + + # pylint: disable=too-many-arguments,too-many-instance-attributes, line-too-long + def __init__( + self, + **kwargs: Any, + ): + self.global_model: Optional[bytes] = None + super().__init__(**kwargs) + + def aggregate_fit( + self, + server_round: int, + results: List[Tuple[ClientProxy, FitRes]], + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + """Aggregate fit results using bagging.""" + if not results: + return None, {} + # Do not aggregate if there are failures and failures are not accepted + if not self.accept_failures and failures: + return None, {} + + # Fetch the client model from last round as global model + for _, fit_res in results: + update = fit_res.parameters.tensors + for bst in update: + self.global_model = bst + + return ( + Parameters(tensor_type="", tensors=[cast(bytes, self.global_model)]), + {}, + ) + +Unlike the original ``FedAvg``, we don't perform aggregation here. Instead, we just make +a copy of the received client model as global model by overriding ``aggregate_fit``. + +Also, the customised ``configure_fit`` and ``configure_evaluate`` methods ensure the +clients to be sequentially selected given FL round: .. code-block:: python - def configure_fit( - self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: - """Configure the next round of training.""" - config = {} - if self.on_fit_config_fn is not None: - # Custom fit config function provided - config = self.on_fit_config_fn(server_round) - fit_ins = FitIns(parameters, config) - - # Sample clients - sample_size, min_num_clients = self.num_fit_clients( - client_manager.num_available() - ) - clients = client_manager.sample( - num_clients=sample_size, - min_num_clients=min_num_clients, - ) - - # Sample the clients sequentially given server_round - sampled_idx = (server_round - 1) % len(clients) - sampled_clients = [clients[sampled_idx]] - - # Return client/config pairs - return [(client, fit_ins) for client in sampled_clients] - - def configure_evaluate( - self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: - """Configure the next round of evaluation.""" - # Do not configure federated evaluation if fraction eval is 0. - if self.fraction_evaluate == 0.0: - return [] - - # Parameters and config - config = {} - if self.on_evaluate_config_fn is not None: - # Custom evaluation config function provided - config = self.on_evaluate_config_fn(server_round) - evaluate_ins = EvaluateIns(parameters, config) - - # Sample clients - sample_size, min_num_clients = self.num_evaluation_clients( - client_manager.num_available() - ) - clients = client_manager.sample( - num_clients=sample_size, - min_num_clients=min_num_clients, - ) - - # Sample the clients sequentially given server_round - sampled_idx = (server_round - 1) % len(clients) - sampled_clients = [clients[sampled_idx]] - - # Return client/config pairs - return [(client, evaluate_ins) for client in sampled_clients] + def configure_fit( + self, server_round: int, parameters: Parameters, client_manager: ClientManager + ) -> List[Tuple[ClientProxy, FitIns]]: + """Configure the next round of training.""" + config = {} + if self.on_fit_config_fn is not None: + # Custom fit config function provided + config = self.on_fit_config_fn(server_round) + fit_ins = FitIns(parameters, config) + + # Sample clients + sample_size, min_num_clients = self.num_fit_clients(client_manager.num_available()) + clients = client_manager.sample( + num_clients=sample_size, + min_num_clients=min_num_clients, + ) + + # Sample the clients sequentially given server_round + sampled_idx = (server_round - 1) % len(clients) + sampled_clients = [clients[sampled_idx]] + + # Return client/config pairs + return [(client, fit_ins) for client in sampled_clients] + def configure_evaluate( + self, server_round: int, parameters: Parameters, client_manager: ClientManager + ) -> List[Tuple[ClientProxy, EvaluateIns]]: + """Configure the next round of evaluation.""" + # Do not configure federated evaluation if fraction eval is 0. + if self.fraction_evaluate == 0.0: + return [] + + # Parameters and config + config = {} + if self.on_evaluate_config_fn is not None: + # Custom evaluation config function provided + config = self.on_evaluate_config_fn(server_round) + evaluate_ins = EvaluateIns(parameters, config) + + # Sample clients + sample_size, min_num_clients = self.num_evaluation_clients( + client_manager.num_available() + ) + clients = client_manager.sample( + num_clients=sample_size, + min_num_clients=min_num_clients, + ) + + # Sample the clients sequentially given server_round + sampled_idx = (server_round - 1) % len(clients) + sampled_clients = [clients[sampled_idx]] + + # Return client/config pairs + return [(client, evaluate_ins) for client in sampled_clients] Customised data partitioning -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In :code:`dataset.py`, we have a function :code:`instantiate_partitioner` to instantiate the data partitioner -based on the given :code:`num_partitions` and :code:`partitioner_type`. -Currently, we provide four supported partitioner type to simulate the uniformity/non-uniformity in data quantity (uniform, linear, square, exponential). +In ``dataset.py``, we have a function ``instantiate_partitioner`` to instantiate the +data partitioner based on the given ``num_partitions`` and ``partitioner_type``. +Currently, we provide four supported partitioner type to simulate the +uniformity/non-uniformity in data quantity (uniform, linear, square, exponential). .. code-block:: python @@ -841,11 +869,10 @@ Currently, we provide four supported partitioner type to simulate the uniformity ) return partitioner - Customised centralised/distributed evaluation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To facilitate centralised evaluation, we define a function in :code:`server_utils.py`: +To facilitate centralised evaluation, we define a function in ``server_utils.py``: .. code-block:: python @@ -877,105 +904,112 @@ To facilitate centralised evaluation, we define a function in :code:`server_util return evaluate_fn -This function returns a evaluation function which instantiates a :code:`Booster` object and loads the global model weights to it. -The evaluation is conducted by calling :code:`eval_set()` method, and the tested AUC value is reported. +This function returns a evaluation function which instantiates a ``Booster`` object and +loads the global model weights to it. The evaluation is conducted by calling +``eval_set()`` method, and the tested AUC value is reported. As for distributed evaluation on the clients, it's same as the quick-start example by -overriding the :code:`evaluate()` method insides the :code:`XgbClient` class in :code:`client_utils.py`. +overriding the ``evaluate()`` method insides the ``XgbClient`` class in +``client_utils.py``. Flower simulation -~~~~~~~~~~~~~~~~~~~~ -We also provide an example code (:code:`sim.py`) to use the simulation capabilities of Flower to simulate federated XGBoost training on either a single machine or a cluster of machines. - -.. code-block:: python +~~~~~~~~~~~~~~~~~ - from logging import INFO - import xgboost as xgb - from tqdm import tqdm - - import flwr as fl - from flwr_datasets import FederatedDataset - from flwr.common.logger import log - from flwr.server.strategy import FedXgbBagging, FedXgbCyclic - - from dataset import ( - instantiate_partitioner, - train_test_split, - transform_dataset_to_dmatrix, - separate_xy, - resplit, - ) - from utils import ( - sim_args_parser, - NUM_LOCAL_ROUND, - BST_PARAMS, - ) - from server_utils import ( - eval_config, - fit_config, - evaluate_metrics_aggregation, - get_evaluate_fn, - CyclicClientManager, - ) - from client_utils import XgbClient - -After importing all required packages, we define a :code:`main()` function to perform the simulation process: +We also provide an example code (``sim.py``) to use the simulation capabilities of +Flower to simulate federated XGBoost training on either a single machine or a cluster of +machines. .. code-block:: python - def main(): - # Parse arguments for experimental settings - args = sim_args_parser() + from logging import INFO + import xgboost as xgb + from tqdm import tqdm - # Load (HIGGS) dataset and conduct partitioning - partitioner = instantiate_partitioner( - partitioner_type=args.partitioner_type, num_partitions=args.pool_size + import flwr as fl + from flwr_datasets import FederatedDataset + from flwr.common.logger import log + from flwr.server.strategy import FedXgbBagging, FedXgbCyclic + + from dataset import ( + instantiate_partitioner, + train_test_split, + transform_dataset_to_dmatrix, + separate_xy, + resplit, + ) + from utils import ( + sim_args_parser, + NUM_LOCAL_ROUND, + BST_PARAMS, ) - fds = FederatedDataset( - dataset="jxie/higgs", - partitioners={"train": partitioner}, - resplitter=resplit, + from server_utils import ( + eval_config, + fit_config, + evaluate_metrics_aggregation, + get_evaluate_fn, + CyclicClientManager, ) + from client_utils import XgbClient - # Load centralised test set - if args.centralised_eval or args.centralised_eval_client: - log(INFO, "Loading centralised test set...") - test_data = fds.load_split("test") - test_data.set_format("numpy") - num_test = test_data.shape[0] - test_dmatrix = transform_dataset_to_dmatrix(test_data) - - # Load partitions and reformat data to DMatrix for xgboost - log(INFO, "Loading client local partitions...") - train_data_list = [] - valid_data_list = [] - - # Load and process all client partitions. This upfront cost is amortized soon - # after the simulation begins since clients wont need to preprocess their partition. - for node_id in tqdm(range(args.pool_size), desc="Extracting client partition"): - # Extract partition for client with node_id - partition = fds.load_partition(node_id=node_id, split="train") - partition.set_format("numpy") - - if args.centralised_eval_client: - # Use centralised test set for evaluation - train_data = partition - num_train = train_data.shape[0] - x_test, y_test = separate_xy(test_data) - valid_data_list.append(((x_test, y_test), num_test)) - else: - # Train/test splitting - train_data, valid_data, num_train, num_val = train_test_split( - partition, test_fraction=args.test_fraction, seed=args.seed - ) - x_valid, y_valid = separate_xy(valid_data) - valid_data_list.append(((x_valid, y_valid), num_val)) +After importing all required packages, we define a ``main()`` function to perform the +simulation process: - x_train, y_train = separate_xy(train_data) - train_data_list.append(((x_train, y_train), num_train)) +.. code-block:: python + + def main(): + # Parse arguments for experimental settings + args = sim_args_parser() -We first load the dataset and perform data partitioning, and the pre-processed data is stored in a :code:`list`. -After the simulation begins, the clients won't need to pre-process their partitions again. + # Load (HIGGS) dataset and conduct partitioning + partitioner = instantiate_partitioner( + partitioner_type=args.partitioner_type, num_partitions=args.pool_size + ) + fds = FederatedDataset( + dataset="jxie/higgs", + partitioners={"train": partitioner}, + resplitter=resplit, + ) + + # Load centralised test set + if args.centralised_eval or args.centralised_eval_client: + log(INFO, "Loading centralised test set...") + test_data = fds.load_split("test") + test_data.set_format("numpy") + num_test = test_data.shape[0] + test_dmatrix = transform_dataset_to_dmatrix(test_data) + + # Load partitions and reformat data to DMatrix for xgboost + log(INFO, "Loading client local partitions...") + train_data_list = [] + valid_data_list = [] + + # Load and process all client partitions. This upfront cost is amortized soon + # after the simulation begins since clients wont need to preprocess their partition. + for node_id in tqdm(range(args.pool_size), desc="Extracting client partition"): + # Extract partition for client with node_id + partition = fds.load_partition(node_id=node_id, split="train") + partition.set_format("numpy") + + if args.centralised_eval_client: + # Use centralised test set for evaluation + train_data = partition + num_train = train_data.shape[0] + x_test, y_test = separate_xy(test_data) + valid_data_list.append(((x_test, y_test), num_test)) + else: + # Train/test splitting + train_data, valid_data, num_train, num_val = train_test_split( + partition, test_fraction=args.test_fraction, seed=args.seed + ) + x_valid, y_valid = separate_xy(valid_data) + valid_data_list.append(((x_valid, y_valid), num_val)) + + x_train, y_train = separate_xy(train_data) + train_data_list.append(((x_train, y_train), num_train)) + +We first load the dataset and perform data partitioning, and the pre-processed data is +stored in a ``list``. After the simulation begins, the clients won't need to pre-process +their partitions again. Then, we define the strategies and other hyper-parameters: @@ -985,21 +1019,21 @@ Then, we define the strategies and other hyper-parameters: if args.train_method == "bagging": # Bagging training strategy = FedXgbBagging( - evaluate_function=get_evaluate_fn(test_dmatrix) - if args.centralised_eval - else None, + evaluate_function=( + get_evaluate_fn(test_dmatrix) if args.centralised_eval else None + ), fraction_fit=(float(args.num_clients_per_round) / args.pool_size), min_fit_clients=args.num_clients_per_round, min_available_clients=args.pool_size, - min_evaluate_clients=args.num_evaluate_clients - if not args.centralised_eval - else 0, + min_evaluate_clients=( + args.num_evaluate_clients if not args.centralised_eval else 0 + ), fraction_evaluate=1.0 if not args.centralised_eval else 0.0, on_evaluate_config_fn=eval_config, on_fit_config_fn=fit_config, - evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation - if not args.centralised_eval - else None, + evaluate_metrics_aggregation_fn=( + evaluate_metrics_aggregation if not args.centralised_eval else None + ), ) else: # Cyclic training @@ -1028,7 +1062,7 @@ Then, we define the strategies and other hyper-parameters: new_lr = params["eta"] / args.pool_size params.update({"eta": new_lr}) -After that, we start the simulation by calling :code:`fl.simulation.start_simulation`: +After that, we start the simulation by calling ``fl.simulation.start_simulation``: .. code-block:: python @@ -1048,53 +1082,52 @@ After that, we start the simulation by calling :code:`fl.simulation.start_simula client_manager=CyclicClientManager() if args.train_method == "cyclic" else None, ) -One of key parameters for :code:`start_simulation` is :code:`client_fn` which returns a function to construct a client. -We define it as follows: +One of key parameters for ``start_simulation`` is ``client_fn`` which returns a function +to construct a client. We define it as follows: .. code-block:: python - def get_client_fn( - train_data_list, valid_data_list, train_method, params, num_local_round - ): - """Return a function to construct a client. - - The VirtualClientEngine will execute this function whenever a client is sampled by - the strategy to participate. - """ - - def client_fn(cid: str) -> fl.client.Client: - """Construct a FlowerClient with its own dataset partition.""" - x_train, y_train = train_data_list[int(cid)][0] - x_valid, y_valid = valid_data_list[int(cid)][0] - - # Reformat data to DMatrix - train_dmatrix = xgb.DMatrix(x_train, label=y_train) - valid_dmatrix = xgb.DMatrix(x_valid, label=y_valid) - - # Fetch the number of examples - num_train = train_data_list[int(cid)][1] - num_val = valid_data_list[int(cid)][1] - - # Create and return client - return XgbClient( - train_dmatrix, - valid_dmatrix, - num_train, - num_val, - num_local_round, - params, - train_method, - ) - - return client_fn - + def get_client_fn( + train_data_list, valid_data_list, train_method, params, num_local_round + ): + """Return a function to construct a client. + + The VirtualClientEngine will execute this function whenever a client is sampled by + the strategy to participate. + """ + + def client_fn(cid: str) -> fl.client.Client: + """Construct a FlowerClient with its own dataset partition.""" + x_train, y_train = train_data_list[int(cid)][0] + x_valid, y_valid = valid_data_list[int(cid)][0] + + # Reformat data to DMatrix + train_dmatrix = xgb.DMatrix(x_train, label=y_train) + valid_dmatrix = xgb.DMatrix(x_valid, label=y_valid) + + # Fetch the number of examples + num_train = train_data_list[int(cid)][1] + num_val = valid_data_list[int(cid)][1] + + # Create and return client + return XgbClient( + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + params, + train_method, + ) + return client_fn Arguments parser -~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~ -In :code:`utils.py`, we define the arguments parsers for clients, server and simulation, allowing users to specify different experimental settings. -Let's first see the sever side: +In ``utils.py``, we define the arguments parsers for clients, server and simulation, +allowing users to specify different experimental settings. Let's first see the sever +side: .. code-block:: python @@ -1102,190 +1135,192 @@ Let's first see the sever side: def server_args_parser(): - """Parse arguments to define experimental settings on server side.""" - parser = argparse.ArgumentParser() - - parser.add_argument( - "--train-method", - default="bagging", - type=str, - choices=["bagging", "cyclic"], - help="Training methods selected from bagging aggregation or cyclic training.", - ) - parser.add_argument( - "--pool-size", default=2, type=int, help="Number of total clients." - ) - parser.add_argument( - "--num-rounds", default=5, type=int, help="Number of FL rounds." - ) - parser.add_argument( - "--num-clients-per-round", - default=2, - type=int, - help="Number of clients participate in training each round.", - ) - parser.add_argument( - "--num-evaluate-clients", - default=2, - type=int, - help="Number of clients selected for evaluation.", - ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", - ) - - args = parser.parse_args() - return args - -This allows user to specify training strategies / the number of total clients / FL rounds / participating clients / clients for evaluation, -and evaluation fashion. Note that with :code:`--centralised-eval`, the sever will do centralised evaluation -and all functionalities for client evaluation will be disabled. + """Parse arguments to define experimental settings on server side.""" + parser = argparse.ArgumentParser() + + parser.add_argument( + "--train-method", + default="bagging", + type=str, + choices=["bagging", "cyclic"], + help="Training methods selected from bagging aggregation or cyclic training.", + ) + parser.add_argument( + "--pool-size", default=2, type=int, help="Number of total clients." + ) + parser.add_argument( + "--num-rounds", default=5, type=int, help="Number of FL rounds." + ) + parser.add_argument( + "--num-clients-per-round", + default=2, + type=int, + help="Number of clients participate in training each round.", + ) + parser.add_argument( + "--num-evaluate-clients", + default=2, + type=int, + help="Number of clients selected for evaluation.", + ) + parser.add_argument( + "--centralised-eval", + action="store_true", + help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", + ) + + args = parser.parse_args() + return args + +This allows user to specify training strategies / the number of total clients / FL +rounds / participating clients / clients for evaluation, and evaluation fashion. Note +that with ``--centralised-eval``, the sever will do centralised evaluation and all +functionalities for client evaluation will be disabled. Then, the argument parser on client side: .. code-block:: python def client_args_parser(): - """Parse arguments to define experimental settings on client side.""" - parser = argparse.ArgumentParser() - - parser.add_argument( - "--train-method", - default="bagging", - type=str, - choices=["bagging", "cyclic"], - help="Training methods selected from bagging aggregation or cyclic training.", - ) - parser.add_argument( - "--num-partitions", default=10, type=int, help="Number of partitions." - ) - parser.add_argument( - "--partitioner-type", - default="uniform", - type=str, - choices=["uniform", "linear", "square", "exponential"], - help="Partitioner types.", - ) - parser.add_argument( - "--node-id", - default=0, - type=int, - help="Node ID used for the current client.", - ) - parser.add_argument( - "--seed", default=42, type=int, help="Seed used for train/test splitting." - ) - parser.add_argument( - "--test-fraction", - default=0.2, - type=float, - help="Test fraction for train/test splitting.", - ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", - ) - parser.add_argument( - "--scaled-lr", - action="store_true", - help="Perform scaled learning rate based on the number of clients (True).", - ) - - args = parser.parse_args() - return args - -This defines various options for client data partitioning. -Besides, clients also have an option to conduct evaluation on centralised test set by setting :code:`--centralised-eval`, -as well as an option to perform scaled learning rate based on the number of clients by setting :code:`--scaled-lr`. + """Parse arguments to define experimental settings on client side.""" + parser = argparse.ArgumentParser() + + parser.add_argument( + "--train-method", + default="bagging", + type=str, + choices=["bagging", "cyclic"], + help="Training methods selected from bagging aggregation or cyclic training.", + ) + parser.add_argument( + "--num-partitions", default=10, type=int, help="Number of partitions." + ) + parser.add_argument( + "--partitioner-type", + default="uniform", + type=str, + choices=["uniform", "linear", "square", "exponential"], + help="Partitioner types.", + ) + parser.add_argument( + "--node-id", + default=0, + type=int, + help="Node ID used for the current client.", + ) + parser.add_argument( + "--seed", default=42, type=int, help="Seed used for train/test splitting." + ) + parser.add_argument( + "--test-fraction", + default=0.2, + type=float, + help="Test fraction for train/test splitting.", + ) + parser.add_argument( + "--centralised-eval", + action="store_true", + help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", + ) + parser.add_argument( + "--scaled-lr", + action="store_true", + help="Perform scaled learning rate based on the number of clients (True).", + ) + + args = parser.parse_args() + return args + +This defines various options for client data partitioning. Besides, clients also have an +option to conduct evaluation on centralised test set by setting ``--centralised-eval``, +as well as an option to perform scaled learning rate based on the number of clients by +setting ``--scaled-lr``. We also have an argument parser for simulation: .. code-block:: python - def sim_args_parser(): - """Parse arguments to define experimental settings on server side.""" - parser = argparse.ArgumentParser() - - parser.add_argument( - "--train-method", - default="bagging", - type=str, - choices=["bagging", "cyclic"], - help="Training methods selected from bagging aggregation or cyclic training.", - ) - - # Server side - parser.add_argument( - "--pool-size", default=5, type=int, help="Number of total clients." - ) - parser.add_argument( - "--num-rounds", default=30, type=int, help="Number of FL rounds." - ) - parser.add_argument( - "--num-clients-per-round", - default=5, - type=int, - help="Number of clients participate in training each round.", - ) - parser.add_argument( - "--num-evaluate-clients", - default=5, - type=int, - help="Number of clients selected for evaluation.", - ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", - ) - parser.add_argument( - "--num-cpus-per-client", - default=2, - type=int, - help="Number of CPUs used for per client.", - ) - - # Client side - parser.add_argument( - "--partitioner-type", - default="uniform", - type=str, - choices=["uniform", "linear", "square", "exponential"], - help="Partitioner types.", - ) - parser.add_argument( - "--seed", default=42, type=int, help="Seed used for train/test splitting." - ) - parser.add_argument( - "--test-fraction", - default=0.2, - type=float, - help="Test fraction for train/test splitting.", - ) - parser.add_argument( - "--centralised-eval-client", - action="store_true", - help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", - ) - parser.add_argument( - "--scaled-lr", - action="store_true", - help="Perform scaled learning rate based on the number of clients (True).", - ) - - args = parser.parse_args() - return args + def sim_args_parser(): + """Parse arguments to define experimental settings on server side.""" + parser = argparse.ArgumentParser() + + parser.add_argument( + "--train-method", + default="bagging", + type=str, + choices=["bagging", "cyclic"], + help="Training methods selected from bagging aggregation or cyclic training.", + ) + + # Server side + parser.add_argument( + "--pool-size", default=5, type=int, help="Number of total clients." + ) + parser.add_argument( + "--num-rounds", default=30, type=int, help="Number of FL rounds." + ) + parser.add_argument( + "--num-clients-per-round", + default=5, + type=int, + help="Number of clients participate in training each round.", + ) + parser.add_argument( + "--num-evaluate-clients", + default=5, + type=int, + help="Number of clients selected for evaluation.", + ) + parser.add_argument( + "--centralised-eval", + action="store_true", + help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", + ) + parser.add_argument( + "--num-cpus-per-client", + default=2, + type=int, + help="Number of CPUs used for per client.", + ) + + # Client side + parser.add_argument( + "--partitioner-type", + default="uniform", + type=str, + choices=["uniform", "linear", "square", "exponential"], + help="Partitioner types.", + ) + parser.add_argument( + "--seed", default=42, type=int, help="Seed used for train/test splitting." + ) + parser.add_argument( + "--test-fraction", + default=0.2, + type=float, + help="Test fraction for train/test splitting.", + ) + parser.add_argument( + "--centralised-eval-client", + action="store_true", + help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", + ) + parser.add_argument( + "--scaled-lr", + action="store_true", + help="Perform scaled learning rate based on the number of clients (True).", + ) + + args = parser.parse_args() + return args This integrates all arguments for both client and server sides. Example commands -~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~ -To run a centralised evaluated experiment with bagging strategy on 5 clients with exponential distribution for 50 rounds, -we first start the server as below: +To run a centralised evaluated experiment with bagging strategy on 5 clients with +exponential distribution for 50 rounds, we first start the server as below: .. code-block:: shell @@ -1303,4 +1338,6 @@ To run the same experiment with Flower simulation: $ python3 sim.py --train-method=bagging --pool-size=5 --num-rounds=50 --num-clients-per-round=5 --partitioner-type=exponential --centralised-eval -The full `code `_ for this comprehensive example can be found in :code:`examples/xgboost-comprehensive`. +The full `code +`_ for this +comprehensive example can be found in ``examples/xgboost-comprehensive``. diff --git a/e2e/e2e-bare-auth/certificate.conf b/e2e/e2e-bare-auth/certificate.conf index ea97fcbb700d..04a2ed388174 100644 --- a/e2e/e2e-bare-auth/certificate.conf +++ b/e2e/e2e-bare-auth/certificate.conf @@ -18,3 +18,4 @@ subjectAltName = @alt_names DNS.1 = localhost IP.1 = ::1 IP.2 = 127.0.0.1 +IP.3 = 0.0.0.0 diff --git a/e2e/test_superexec.sh b/e2e/test_superexec.sh new file mode 100755 index 000000000000..ae79128c6ac1 --- /dev/null +++ b/e2e/test_superexec.sh @@ -0,0 +1,122 @@ +#!/bin/bash +set -e + +# Set connectivity parameters +case "$1" in + secure) + ./generate.sh + server_arg='--ssl-ca-certfile ../certificates/ca.crt + --ssl-certfile ../certificates/server.pem + --ssl-keyfile ../certificates/server.key' + client_arg='--root-certificates ../certificates/ca.crt' + # For $superexec_arg, note special ordering of single- and double-quotes + superexec_arg='--executor-config 'root-certificates=\"../certificates/ca.crt\"'' + superexec_arg="$server_arg $superexec_arg" + ;; + insecure) + server_arg='--insecure' + client_arg=$server_arg + superexec_arg=$server_arg + ;; +esac + +# Set authentication parameters +case "$2" in + client-auth) + server_auth='--auth-list-public-keys ../keys/client_public_keys.csv + --auth-superlink-private-key ../keys/server_credentials + --auth-superlink-public-key ../keys/server_credentials.pub' + client_auth_1='--auth-supernode-private-key ../keys/client_credentials_1 + --auth-supernode-public-key ../keys/client_credentials_1.pub' + client_auth_2='--auth-supernode-private-key ../keys/client_credentials_2 + --auth-supernode-public-key ../keys/client_credentials_2.pub' + server_address='127.0.0.1:9092' + ;; + *) + server_auth='' + client_auth_1='' + client_auth_2='' + server_address='127.0.0.1:9092' + ;; +esac + +# Set engine +case "$3" in + deployment-engine) + superexec_engine_arg='--executor flwr.superexec.deployment:executor' + ;; + simulation-engine) + superexec_engine_arg='--executor flwr.superexec.simulation:executor + --executor-config 'num-supernodes=10'' + ;; +esac + + +# Create and install Flower app +flwr new e2e-tmp-test --framework numpy --username flwrlabs +cd e2e-tmp-test +# Remove flwr dependency from `pyproject.toml`. Seems necessary so that it does +# not override the wheel dependency +if [[ "$OSTYPE" == "darwin"* ]]; then + # macOS (Darwin) system + sed -i '' '/flwr\[simulation\]/d' pyproject.toml +else + # Non-macOS system (Linux) + sed -i '/flwr\[simulation\]/d' pyproject.toml +fi +pip install -e . --no-deps + +# Check if the first argument is 'insecure' +if [ "$1" == "insecure" ]; then + # If $1 is 'insecure', append the first line + echo -e $"\n[tool.flwr.federations.superexec]\naddress = \"127.0.0.1:9093\"\ninsecure = true" >> pyproject.toml +else + # Otherwise, append the second line + echo -e $"\n[tool.flwr.federations.superexec]\naddress = \"127.0.0.1:9093\"\nroot-certificates = \"../certificates/ca.crt\"" >> pyproject.toml +fi + +timeout 2m flower-superlink $server_arg $server_auth & +sl_pid=$! +sleep 2 + +timeout 2m flower-supernode ./ $client_arg \ + --superlink $server_address $client_auth_1 \ + --node-config "partition-id=0 num-partitions=2" --max-retries 0 & +cl1_pid=$! +sleep 2 + +timeout 2m flower-supernode ./ $client_arg \ + --superlink $server_address $client_auth_2 \ + --node-config "partition-id=1 num-partitions=2" --max-retries 0 & +cl2_pid=$! +sleep 2 + +timeout 2m flower-superexec $superexec_arg $superexec_engine_arg 2>&1 | tee flwr_output.log & +se_pid=$(pgrep -f "flower-superexec") +sleep 2 + +timeout 1m flwr run --run-config num-server-rounds=1 ../e2e-tmp-test superexec + +# Initialize a flag to track if training is successful +found_success=false +timeout=120 # Timeout after 120 seconds +elapsed=0 + +# Check for "Success" in a loop with a timeout +while [ "$found_success" = false ] && [ $elapsed -lt $timeout ]; do + if grep -q "Run finished" flwr_output.log; then + echo "Training worked correctly!" + found_success=true + kill $cl1_pid; kill $cl2_pid; sleep 1; kill $sl_pid; kill $se_pid; + else + echo "Waiting for training ... ($elapsed seconds elapsed)" + fi + # Sleep for a short period and increment the elapsed time + sleep 2 + elapsed=$((elapsed + 2)) +done + +if [ "$found_success" = false ]; then + echo "Training had an issue and timed out." + kill $cl1_pid; kill $cl2_pid; kill $sl_pid; kill $se_pid; +fi diff --git a/pyproject.toml b/pyproject.toml index 536d0ddd20c4..81c1369f6552 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -109,25 +109,24 @@ mypy-protobuf = "==3.2.0" jupyterlab = "==4.0.12" rope = "==1.11.0" semver = "==3.0.2" -sphinx = "==6.2.1" +sphinx = "==7.4.7" sphinx-intl = "==2.2.0" -sphinx-click = "==5.1.0" -myst-parser = "==1.0.0" -sphinx-design = "==0.5.0" +sphinx-click = "==6.0.0" +myst-parser = "==3.0.1" +sphinx-design = "==0.6.1" sphinx-copybutton = "==0.5.2" sphinxcontrib-mermaid = "==0.9.2" sphinxcontrib-youtube = "==1.4.1" -furo = "==2023.9.10" -sphinx-reredirects = "==0.1.3" -nbsphinx = "==0.9.4" +furo = "==2024.8.6" +sphinx-reredirects = "==0.1.5" +nbsphinx = "==0.9.5" nbstripout = "==0.6.1" ruff = "==0.1.9" sphinx-argparse = "==0.4.0" pipreqs = "==0.4.13" -mdformat-gfm = "==0.3.5" +mdformat-gfm = "==0.3.6" mdformat-frontmatter = "==2.0.1" mdformat-beautysh = "==0.1.1" -mdformat-myst = "==0.1.5" twine = "==5.1.1" pyroma = "==4.2" check-wheel-contents = "==0.4.0" @@ -137,6 +136,15 @@ licensecheck = "==2024" pre-commit = "==3.5.0" sphinx-substitution-extensions = "2022.02.16" sphinxext-opengraph = "==0.9.1" +docstrfmt = { git = "https://github.com/charlesbvll/docstrfmt.git", branch = "patch-1" } + +[tool.docstrfmt] +extend_exclude = [ + "doc/source/conf.py", + "doc/source/tutorial-quickstart-huggingface.rst", + "doc/source/_templates/autosummary/*", + "doc/source/ref-api/*", +] [tool.isort] profile = "black" diff --git a/src/docker/base/ubuntu/Dockerfile b/src/docker/base/ubuntu/Dockerfile index 47655b1a52a1..b52599a80784 100644 --- a/src/docker/base/ubuntu/Dockerfile +++ b/src/docker/base/ubuntu/Dockerfile @@ -15,7 +15,7 @@ # hadolint global ignore=DL3008 ARG DISTRO=ubuntu -ARG DISTRO_VERSION=22.04 +ARG DISTRO_VERSION=24.04 FROM $DISTRO:$DISTRO_VERSION AS python ENV DEBIAN_FRONTEND=noninteractive @@ -87,11 +87,10 @@ RUN apt-get update \ ca-certificates \ && rm -rf /var/lib/apt/lists/* \ # add non-root user - && adduser \ + && useradd \ --no-create-home \ - --home /app \ - --disabled-password \ - --gecos "" \ + --home-dir /app \ + -c "" \ --uid 49999 app \ && mkdir -p /app \ && chown -R app:app /app diff --git a/src/proto/flwr/proto/fab.proto b/src/proto/flwr/proto/fab.proto index 6f8e6b87808d..367b6e5b5c13 100644 --- a/src/proto/flwr/proto/fab.proto +++ b/src/proto/flwr/proto/fab.proto @@ -17,6 +17,8 @@ syntax = "proto3"; package flwr.proto; +import "flwr/proto/node.proto"; + message Fab { // This field is the hash of the data field. It is used to identify the data. // The hash is calculated using the SHA-256 algorithm and is represented as a @@ -26,5 +28,8 @@ message Fab { bytes content = 2; } -message GetFabRequest { string hash_str = 1; } +message GetFabRequest { + Node node = 1; + string hash_str = 2; +} message GetFabResponse { Fab fab = 1; } diff --git a/src/proto/flwr/proto/fleet.proto b/src/proto/flwr/proto/fleet.proto index b87214ac52f3..130b30b96669 100644 --- a/src/proto/flwr/proto/fleet.proto +++ b/src/proto/flwr/proto/fleet.proto @@ -69,7 +69,10 @@ message PullTaskInsResponse { } // PushTaskRes messages -message PushTaskResRequest { repeated TaskRes task_res_list = 1; } +message PushTaskResRequest { + Node node = 1; + repeated TaskRes task_res_list = 2; +} message PushTaskResResponse { Reconnect reconnect = 1; map results = 2; diff --git a/src/proto/flwr/proto/recordset.proto b/src/proto/flwr/proto/recordset.proto index d51d0f9ce416..939e97cf46e3 100644 --- a/src/proto/flwr/proto/recordset.proto +++ b/src/proto/flwr/proto/recordset.proto @@ -18,7 +18,8 @@ syntax = "proto3"; package flwr.proto; message DoubleList { repeated double vals = 1; } -message Sint64List { repeated sint64 vals = 1; } +message SintList { repeated sint64 vals = 1; } +message UintList { repeated uint64 vals = 1; } message BoolList { repeated bool vals = 1; } message StringList { repeated string vals = 1; } message BytesList { repeated bytes vals = 1; } @@ -35,10 +36,12 @@ message MetricsRecordValue { // Single element double double = 1; sint64 sint64 = 2; + uint64 uint64 = 3; // List types DoubleList double_list = 21; - Sint64List sint64_list = 22; + SintList sint_list = 22; + UintList uint_list = 23; } } @@ -47,16 +50,18 @@ message ConfigsRecordValue { // Single element double double = 1; sint64 sint64 = 2; - bool bool = 3; - string string = 4; - bytes bytes = 5; + uint64 uint64 = 3; + bool bool = 4; + string string = 5; + bytes bytes = 6; // List types DoubleList double_list = 21; - Sint64List sint64_list = 22; - BoolList bool_list = 23; - StringList string_list = 24; - BytesList bytes_list = 25; + SintList sint_list = 22; + UintList uint_list = 23; + BoolList bool_list = 24; + StringList string_list = 25; + BytesList bytes_list = 26; } } diff --git a/src/proto/flwr/proto/run.proto b/src/proto/flwr/proto/run.proto index 2c9bd877f66c..4312e1127cc2 100644 --- a/src/proto/flwr/proto/run.proto +++ b/src/proto/flwr/proto/run.proto @@ -18,6 +18,7 @@ syntax = "proto3"; package flwr.proto; import "flwr/proto/fab.proto"; +import "flwr/proto/node.proto"; import "flwr/proto/transport.proto"; message Run { @@ -47,7 +48,10 @@ message CreateRunRequest { message CreateRunResponse { uint64 run_id = 1; } // GetRun -message GetRunRequest { uint64 run_id = 1; } +message GetRunRequest { + Node node = 1; + uint64 run_id = 2; +} message GetRunResponse { Run run = 1; } // UpdateRunStatus @@ -58,5 +62,8 @@ message UpdateRunStatusRequest { message UpdateRunStatusResponse {} // GetRunStatus -message GetRunStatusRequest { repeated uint64 run_ids = 1; } +message GetRunStatusRequest { + Node node = 1; + repeated uint64 run_ids = 2; +} message GetRunStatusResponse { map run_status_dict = 1; } diff --git a/src/proto/flwr/proto/transport.proto b/src/proto/flwr/proto/transport.proto index 17a285ebe44b..6a4f45aa3c97 100644 --- a/src/proto/flwr/proto/transport.proto +++ b/src/proto/flwr/proto/transport.proto @@ -107,7 +107,7 @@ message Scalar { // int32 int32 = 3; // int64 int64 = 4; // uint32 uint32 = 5; - // uint64 uint64 = 6; + uint64 uint64 = 6; // sint32 sint32 = 7; sint64 sint64 = 8; // fixed32 fixed32 = 9; diff --git a/src/py/flwr/cli/log.py b/src/py/flwr/cli/log.py index 6915de1e00c5..7199cefce4f7 100644 --- a/src/py/flwr/cli/log.py +++ b/src/py/flwr/cli/log.py @@ -26,18 +26,71 @@ from flwr.cli.config_utils import load_and_validate from flwr.common.grpc import GRPC_MAX_MESSAGE_LENGTH, create_channel from flwr.common.logger import log as logger +from flwr.proto.exec_pb2 import StreamLogsRequest # pylint: disable=E0611 +from flwr.proto.exec_pb2_grpc import ExecStub CONN_REFRESH_PERIOD = 60 # Connection refresh period for log streaming (seconds) -# pylint: disable=unused-argument -def stream_logs(run_id: int, channel: grpc.Channel, period: int) -> None: +def start_stream( + run_id: int, channel: grpc.Channel, refresh_period: int = CONN_REFRESH_PERIOD +) -> None: + """Start log streaming for a given run ID.""" + try: + while True: + logger(INFO, "Starting logstream for run_id `%s`", run_id) + stream_logs(run_id, channel, refresh_period) + time.sleep(2) + logger(DEBUG, "Reconnecting to logstream") + except KeyboardInterrupt: + logger(INFO, "Exiting logstream") + except grpc.RpcError as e: + # pylint: disable=E1101 + if e.code() == grpc.StatusCode.NOT_FOUND: + logger(ERROR, "Invalid run_id `%s`, exiting", run_id) + if e.code() == grpc.StatusCode.CANCELLED: + pass + finally: + channel.close() + + +def stream_logs(run_id: int, channel: grpc.Channel, duration: int) -> None: """Stream logs from the beginning of a run with connection refresh.""" + start_time = time.time() + stub = ExecStub(channel) + req = StreamLogsRequest(run_id=run_id) + + for res in stub.StreamLogs(req): + print(res.log_output) + if time.time() - start_time > duration: + break -# pylint: disable=unused-argument def print_logs(run_id: int, channel: grpc.Channel, timeout: int) -> None: """Print logs from the beginning of a run.""" + stub = ExecStub(channel) + req = StreamLogsRequest(run_id=run_id) + + try: + while True: + try: + # Enforce timeout for graceful exit + for res in stub.StreamLogs(req, timeout=timeout): + print(res.log_output) + except grpc.RpcError as e: + # pylint: disable=E1101 + if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED: + break + if e.code() == grpc.StatusCode.NOT_FOUND: + logger(ERROR, "Invalid run_id `%s`, exiting", run_id) + break + if e.code() == grpc.StatusCode.CANCELLED: + break + except KeyboardInterrupt: + logger(DEBUG, "Stream interrupted by user") + finally: + channel.close() + logger(DEBUG, "Channel closed") def on_channel_state_change(channel_connectivity: str) -> None: @@ -175,22 +228,7 @@ def _log_with_superexec( channel.subscribe(on_channel_state_change) if stream: - try: - while True: - logger(INFO, "Starting logstream for run_id `%s`", run_id) - stream_logs(run_id, channel, CONN_REFRESH_PERIOD) - time.sleep(2) - logger(DEBUG, "Reconnecting to logstream") - except KeyboardInterrupt: - logger(INFO, "Exiting logstream") - except grpc.RpcError as e: - # pylint: disable=E1101 - if e.code() == grpc.StatusCode.NOT_FOUND: - logger(ERROR, "Invalid run_id `%s`, exiting", run_id) - if e.code() == grpc.StatusCode.CANCELLED: - pass - finally: - channel.close() + start_stream(run_id, channel, CONN_REFRESH_PERIOD) else: logger(INFO, "Printing logstream for run_id `%s`", run_id) print_logs(run_id, channel, timeout=5) diff --git a/src/py/flwr/cli/log_test.py b/src/py/flwr/cli/log_test.py new file mode 100644 index 000000000000..932610bea2f3 --- /dev/null +++ b/src/py/flwr/cli/log_test.py @@ -0,0 +1,78 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test for Flower command line interface `log` command.""" + + +import unittest +from typing import NoReturn +from unittest.mock import Mock, call, patch + +from flwr.proto.exec_pb2 import StreamLogsResponse # pylint: disable=E0611 + +from .log import print_logs, stream_logs + + +class InterruptedStreamLogsResponse: + """Create a StreamLogsResponse object with KeyboardInterrupt.""" + + @property + def log_output(self) -> NoReturn: + """Raise KeyboardInterrupt to exit logstream test gracefully.""" + raise KeyboardInterrupt + + +class TestFlwrLog(unittest.TestCase): + """Unit tests for `flwr log` CLI functions.""" + + def setUp(self) -> None: + """Initialize mock ExecStub before each test.""" + self.expected_calls = [ + call("log_output_1"), + call("log_output_2"), + call("log_output_3"), + ] + mock_response_iterator = [ + iter( + [StreamLogsResponse(log_output=f"log_output_{i}") for i in range(1, 4)] + + [InterruptedStreamLogsResponse()] + ) + ] + self.mock_stub = Mock() + self.mock_stub.StreamLogs.side_effect = mock_response_iterator + self.patcher = patch("flwr.cli.log.ExecStub", return_value=self.mock_stub) + + self.patcher.start() + + # Create mock channel + self.mock_channel = Mock() + + def tearDown(self) -> None: + """Cleanup.""" + self.patcher.stop() + + def test_flwr_log_stream_method(self) -> None: + """Test stream_logs.""" + with patch("builtins.print") as mock_print: + with self.assertRaises(KeyboardInterrupt): + stream_logs(run_id=123, channel=self.mock_channel, duration=1) + # Assert that mock print was called with the expected arguments + mock_print.assert_has_calls(self.expected_calls) + + def test_flwr_log_print_method(self) -> None: + """Test print_logs.""" + with patch("builtins.print") as mock_print: + print_logs(run_id=123, channel=self.mock_channel, timeout=0) + # Assert that mock print was called with the expected arguments + mock_print.assert_has_calls(self.expected_calls) diff --git a/src/py/flwr/cli/new/templates/app/README.flowertune.md.tpl b/src/py/flwr/cli/new/templates/app/README.flowertune.md.tpl index 4bdc9c779a29..2703f0a86a3e 100644 --- a/src/py/flwr/cli/new/templates/app/README.flowertune.md.tpl +++ b/src/py/flwr/cli/new/templates/app/README.flowertune.md.tpl @@ -55,7 +55,7 @@ We use Mistral-7B model with 4-bit quantization as default. The estimated VRAM c | :--------: | :--------: | :--------: | :--------: | :--------: | | VRAM | ~25.50 GB | ~17.30 GB | ~22.80 GB | ~17.40 GB | -You can adjust the CPU/GPU resources you assign to each of the clients based on your device, which are specified with `options.backend.clientapp-cpus` and `options.backend.clientapp-gpus` under `[tool.flwr.federations.local-simulation]` entry in `pyproject.toml`. +You can adjust the CPU/GPU resources you assign to each of the clients based on your device, which are specified with `options.backend.client-resources.num-cpus` and `options.backend.client-resources.num-gpus` under `[tool.flwr.federations.local-simulation]` entry in `pyproject.toml`. ## Model saving diff --git a/src/py/flwr/cli/run/run.py b/src/py/flwr/cli/run/run.py index 905055ac70c0..2832af3aebab 100644 --- a/src/py/flwr/cli/run/run.py +++ b/src/py/flwr/cli/run/run.py @@ -34,6 +34,10 @@ from flwr.proto.exec_pb2 import StartRunRequest # pylint: disable=E0611 from flwr.proto.exec_pb2_grpc import ExecStub +from ..log import start_stream + +CONN_REFRESH_PERIOD = 60 # Connection refresh period for log streaming (seconds) + def on_channel_state_change(channel_connectivity: str) -> None: """Log channel connectivity.""" @@ -62,6 +66,14 @@ def run( "inside the `pyproject.toml` in order to be properly overriden.", ), ] = None, + stream: Annotated[ + bool, + typer.Option( + "--stream", + help="Use `--stream` with `flwr run` to display logs;\n " + "logs are not streamed by default.", + ), + ] = False, ) -> None: """Run Flower App.""" typer.secho("Loading project configuration... ", fg=typer.colors.BLUE) @@ -117,7 +129,7 @@ def run( raise typer.Exit(code=1) if "address" in federation_config: - _run_with_superexec(app, federation_config, config_overrides) + _run_with_superexec(app, federation_config, config_overrides, stream) else: _run_without_superexec(app, federation_config, config_overrides, federation) @@ -126,6 +138,7 @@ def _run_with_superexec( app: Path, federation_config: dict[str, Any], config_overrides: Optional[list[str]], + stream: bool, ) -> None: insecure_str = federation_config.get("insecure") @@ -183,6 +196,9 @@ def _run_with_superexec( fab_path.unlink() typer.secho(f"🎊 Successfully started run {res.run_id}", fg=typer.colors.GREEN) + if stream: + start_stream(res.run_id, channel, CONN_REFRESH_PERIOD) + def _run_without_superexec( app: Optional[Path], diff --git a/src/py/flwr/client/grpc_rere_client/client_interceptor.py b/src/py/flwr/client/grpc_rere_client/client_interceptor.py index 653e384aff96..041860957db7 100644 --- a/src/py/flwr/client/grpc_rere_client/client_interceptor.py +++ b/src/py/flwr/client/grpc_rere_client/client_interceptor.py @@ -31,6 +31,7 @@ generate_shared_key, public_key_to_bytes, ) +from flwr.proto.fab_pb2 import GetFabRequest # pylint: disable=E0611 from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, DeleteNodeRequest, @@ -50,6 +51,7 @@ PushTaskResRequest, GetRunRequest, PingRequest, + GetFabRequest, ] @@ -126,6 +128,7 @@ def intercept_unary_unary( PushTaskResRequest, GetRunRequest, PingRequest, + GetFabRequest, ), ): if self.shared_secret is None: diff --git a/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py b/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py index 27f759a71713..a029b926423f 100644 --- a/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py +++ b/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py @@ -16,12 +16,13 @@ import base64 +import inspect import threading import unittest from collections.abc import Sequence from concurrent import futures from logging import DEBUG, INFO, WARN -from typing import Optional, Union +from typing import Optional, Union, get_args import grpc @@ -47,6 +48,7 @@ PushTaskResRequest, PushTaskResResponse, ) +from flwr.proto.fleet_pb2_grpc import FleetServicer from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 from flwr.proto.task_pb2 import Task, TaskIns # pylint: disable=E0611 @@ -437,6 +439,20 @@ def test_without_servicer(self) -> None: assert self._servicer.received_client_metadata() is None + def test_fleet_requests_included(self) -> None: + """Test if all Fleet requests are included in the authentication mode.""" + # Prepare + requests = get_args(Request) + rpc_names = {req.__qualname__.removesuffix("Request") for req in requests} + expected_rpc_names = { + name + for name, ref in inspect.getmembers(FleetServicer) + if inspect.isfunction(ref) + } + + # Assert + assert expected_rpc_names == rpc_names + if __name__ == "__main__": unittest.main(verbosity=2) diff --git a/src/py/flwr/client/grpc_rere_client/connection.py b/src/py/flwr/client/grpc_rere_client/connection.py index 7ce3d37b7a17..b4fa28373600 100644 --- a/src/py/flwr/client/grpc_rere_client/connection.py +++ b/src/py/flwr/client/grpc_rere_client/connection.py @@ -269,7 +269,7 @@ def send(message: Message) -> None: task_res = message_to_taskres(message) # Serialize ProtoBuf to bytes - request = PushTaskResRequest(task_res_list=[task_res]) + request = PushTaskResRequest(node=node, task_res_list=[task_res]) _ = retry_invoker.invoke(stub.PushTaskRes, request) # Cleanup @@ -277,7 +277,7 @@ def send(message: Message) -> None: def get_run(run_id: int) -> Run: # Call FleetAPI - get_run_request = GetRunRequest(run_id=run_id) + get_run_request = GetRunRequest(node=node, run_id=run_id) get_run_response: GetRunResponse = retry_invoker.invoke( stub.GetRun, request=get_run_request, @@ -294,7 +294,7 @@ def get_run(run_id: int) -> Run: def get_fab(fab_hash: str) -> Fab: # Call FleetAPI - get_fab_request = GetFabRequest(hash_str=fab_hash) + get_fab_request = GetFabRequest(node=node, hash_str=fab_hash) get_fab_response: GetFabResponse = retry_invoker.invoke( stub.GetFab, request=get_fab_request, diff --git a/src/py/flwr/client/rest_client/connection.py b/src/py/flwr/client/rest_client/connection.py index 72b6be25a708..485bbd7a1810 100644 --- a/src/py/flwr/client/rest_client/connection.py +++ b/src/py/flwr/client/rest_client/connection.py @@ -340,7 +340,7 @@ def send(message: Message) -> None: task_res = message_to_taskres(message) # Serialize ProtoBuf to bytes - req = PushTaskResRequest(task_res_list=[task_res]) + req = PushTaskResRequest(node=node, task_res_list=[task_res]) # Send the request res = _request(req, PushTaskResResponse, PATH_PUSH_TASK_RES) @@ -356,7 +356,7 @@ def send(message: Message) -> None: def get_run(run_id: int) -> Run: # Construct the request - req = GetRunRequest(run_id=run_id) + req = GetRunRequest(node=node, run_id=run_id) # Send the request res = _request(req, GetRunResponse, PATH_GET_RUN) @@ -373,7 +373,7 @@ def get_run(run_id: int) -> Run: def get_fab(fab_hash: str) -> Fab: # Construct the request - req = GetFabRequest(hash_str=fab_hash) + req = GetFabRequest(node=node, hash_str=fab_hash) # Send the request res = _request(req, GetFabResponse, PATH_GET_FAB) diff --git a/src/py/flwr/common/secure_aggregation/secaggplus_utils.py b/src/py/flwr/common/secure_aggregation/secaggplus_utils.py index 7bfb80f57891..919894d5388f 100644 --- a/src/py/flwr/common/secure_aggregation/secaggplus_utils.py +++ b/src/py/flwr/common/secure_aggregation/secaggplus_utils.py @@ -43,8 +43,8 @@ def share_keys_plaintext_concat( """ return b"".join( [ - int.to_bytes(src_node_id, 8, "little", signed=True), - int.to_bytes(dst_node_id, 8, "little", signed=True), + int.to_bytes(src_node_id, 8, "little", signed=False), + int.to_bytes(dst_node_id, 8, "little", signed=False), int.to_bytes(len(b_share), 4, "little"), b_share, sk_share, @@ -72,8 +72,8 @@ def share_keys_plaintext_separate(plaintext: bytes) -> tuple[int, int, bytes, by the secret key share of the source sent to the destination. """ src, dst, mark = ( - int.from_bytes(plaintext[:8], "little", signed=True), - int.from_bytes(plaintext[8:16], "little", signed=True), + int.from_bytes(plaintext[:8], "little", signed=False), + int.from_bytes(plaintext[8:16], "little", signed=False), int.from_bytes(plaintext[16:20], "little"), ) ret = (src, dst, plaintext[20 : 20 + mark], plaintext[20 + mark :]) diff --git a/src/py/flwr/common/serde.py b/src/py/flwr/common/serde.py index 87e01b05d341..54790992b40d 100644 --- a/src/py/flwr/common/serde.py +++ b/src/py/flwr/common/serde.py @@ -38,7 +38,7 @@ from flwr.proto.recordset_pb2 import MetricsRecordValue as ProtoMetricsRecordValue from flwr.proto.recordset_pb2 import ParametersRecord as ProtoParametersRecord from flwr.proto.recordset_pb2 import RecordSet as ProtoRecordSet -from flwr.proto.recordset_pb2 import Sint64List, StringList +from flwr.proto.recordset_pb2 import SintList, StringList, UintList from flwr.proto.run_pb2 import Run as ProtoRun from flwr.proto.task_pb2 import Task, TaskIns, TaskRes from flwr.proto.transport_pb2 import ( @@ -340,6 +340,7 @@ def metrics_from_proto(proto: Any) -> typing.Metrics: # === Scalar messages === +INT64_MAX_VALUE = 9223372036854775807 # (1 << 63) - 1 def scalar_to_proto(scalar: typing.Scalar) -> Scalar: @@ -354,6 +355,9 @@ def scalar_to_proto(scalar: typing.Scalar) -> Scalar: return Scalar(double=scalar) if isinstance(scalar, int): + # Use uint64 for integers larger than the maximum value of sint64 + if scalar > INT64_MAX_VALUE: + return Scalar(uint64=scalar) return Scalar(sint64=scalar) if isinstance(scalar, str): @@ -374,16 +378,16 @@ def scalar_from_proto(scalar_msg: Scalar) -> typing.Scalar: # === Record messages === -_type_to_field = { +_type_to_field: dict[type, str] = { float: "double", int: "sint64", bool: "bool", str: "string", bytes: "bytes", } -_list_type_to_class_and_field = { +_list_type_to_class_and_field: dict[type, tuple[type[GrpcMessage], str]] = { float: (DoubleList, "double_list"), - int: (Sint64List, "sint64_list"), + int: (SintList, "sint_list"), bool: (BoolList, "bool_list"), str: (StringList, "string_list"), bytes: (BytesList, "bytes_list"), @@ -391,6 +395,11 @@ def scalar_from_proto(scalar_msg: Scalar) -> typing.Scalar: T = TypeVar("T") +def _is_uint64(value: Any) -> bool: + """Check if a value is uint64.""" + return isinstance(value, int) and value > INT64_MAX_VALUE + + def _record_value_to_proto( value: Any, allowed_types: list[type], proto_class: type[T] ) -> T: @@ -403,12 +412,18 @@ def _record_value_to_proto( # Single element # Note: `isinstance(False, int) == True`. if isinstance(value, t): - arg[_type_to_field[t]] = value + fld = _type_to_field[t] + if t is int and _is_uint64(value): + fld = "uint64" + arg[fld] = value return proto_class(**arg) # List if isinstance(value, list) and all(isinstance(item, t) for item in value): - list_class, field_name = _list_type_to_class_and_field[t] - arg[field_name] = list_class(vals=value) + list_class, fld = _list_type_to_class_and_field[t] + # Use UintList if any element is of type `uint64`. + if t is int and any(_is_uint64(v) for v in value): + list_class, fld = UintList, "uint_list" + arg[fld] = list_class(vals=value) return proto_class(**arg) # Invalid types raise TypeError( diff --git a/src/py/flwr/common/serde_test.py b/src/py/flwr/common/serde_test.py index 49d1e38fa897..19e9889158a0 100644 --- a/src/py/flwr/common/serde_test.py +++ b/src/py/flwr/common/serde_test.py @@ -80,7 +80,7 @@ def test_serialisation_deserialisation() -> None: """Test if the np.ndarray is identical after (de-)serialization.""" # Prepare - scalars = [True, b"bytestr", 3.14, 9000, "Hello"] + scalars = [True, b"bytestr", 3.14, 9000, "Hello", (1 << 63) + 1] for scalar in scalars: # Execute @@ -170,7 +170,7 @@ def get_str(self, length: Optional[int] = None) -> str: length = self.rng.randint(1, 10) return "".join(self.rng.choices(char_pool, k=length)) - def get_value(self, dtype: type[T]) -> T: + def get_value(self, dtype: Union[type[T], str]) -> T: """Create a value of a given type.""" ret: Any = None if dtype == bool: @@ -178,11 +178,13 @@ def get_value(self, dtype: type[T]) -> T: elif dtype == str: ret = self.get_str(self.rng.randint(10, 100)) elif dtype == int: - ret = self.rng.randint(-1 << 30, 1 << 30) + ret = self.rng.randint(-1 << 63, (1 << 63) - 1) elif dtype == float: ret = (self.rng.random() - 0.5) * (2.0 ** self.rng.randint(0, 50)) elif dtype == bytes: ret = self.randbytes(self.rng.randint(10, 100)) + elif dtype == "uint": + ret = self.rng.randint(0, (1 << 64) - 1) else: raise NotImplementedError(f"Unsupported dtype: {dtype}") return cast(T, ret) @@ -316,6 +318,8 @@ def test_metrics_record_serialization_deserialization() -> None: # Prepare maker = RecordMaker() original = maker.metrics_record() + original["uint64"] = (1 << 63) + 321 + original["list of uint64"] = [maker.get_value("uint") for _ in range(30)] # Execute proto = metrics_record_to_proto(original) @@ -331,6 +335,8 @@ def test_configs_record_serialization_deserialization() -> None: # Prepare maker = RecordMaker() original = maker.configs_record() + original["uint64"] = (1 << 63) + 101 + original["list of uint64"] = [maker.get_value("uint") for _ in range(100)] # Execute proto = configs_record_to_proto(original) diff --git a/src/py/flwr/proto/fab_pb2.py b/src/py/flwr/proto/fab_pb2.py index 3f04e6693ab8..3a5e50000c10 100644 --- a/src/py/flwr/proto/fab_pb2.py +++ b/src/py/flwr/proto/fab_pb2.py @@ -12,19 +12,20 @@ _sym_db = _symbol_database.Default() +from flwr.proto import node_pb2 as flwr_dot_proto_dot_node__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14\x66lwr/proto/fab.proto\x12\nflwr.proto\"(\n\x03\x46\x61\x62\x12\x10\n\x08hash_str\x18\x01 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x02 \x01(\x0c\"!\n\rGetFabRequest\x12\x10\n\x08hash_str\x18\x01 \x01(\t\".\n\x0eGetFabResponse\x12\x1c\n\x03\x66\x61\x62\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Fabb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14\x66lwr/proto/fab.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\"(\n\x03\x46\x61\x62\x12\x10\n\x08hash_str\x18\x01 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x02 \x01(\x0c\"A\n\rGetFabRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08hash_str\x18\x02 \x01(\t\".\n\x0eGetFabResponse\x12\x1c\n\x03\x66\x61\x62\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Fabb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.fab_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None - _globals['_FAB']._serialized_start=36 - _globals['_FAB']._serialized_end=76 - _globals['_GETFABREQUEST']._serialized_start=78 - _globals['_GETFABREQUEST']._serialized_end=111 - _globals['_GETFABRESPONSE']._serialized_start=113 - _globals['_GETFABRESPONSE']._serialized_end=159 + _globals['_FAB']._serialized_start=59 + _globals['_FAB']._serialized_end=99 + _globals['_GETFABREQUEST']._serialized_start=101 + _globals['_GETFABREQUEST']._serialized_end=166 + _globals['_GETFABRESPONSE']._serialized_start=168 + _globals['_GETFABRESPONSE']._serialized_end=214 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/fab_pb2.pyi b/src/py/flwr/proto/fab_pb2.pyi index b2715dde5021..8cfdcbaf76ad 100644 --- a/src/py/flwr/proto/fab_pb2.pyi +++ b/src/py/flwr/proto/fab_pb2.pyi @@ -3,6 +3,7 @@ isort:skip_file """ import builtins +import flwr.proto.node_pb2 import google.protobuf.descriptor import google.protobuf.message import typing @@ -33,13 +34,18 @@ global___Fab = Fab class GetFabRequest(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor + NODE_FIELD_NUMBER: builtins.int HASH_STR_FIELD_NUMBER: builtins.int + @property + def node(self) -> flwr.proto.node_pb2.Node: ... hash_str: typing.Text def __init__(self, *, + node: typing.Optional[flwr.proto.node_pb2.Node] = ..., hash_str: typing.Text = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["hash_str",b"hash_str"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["node",b"node"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["hash_str",b"hash_str","node",b"node"]) -> None: ... global___GetFabRequest = GetFabRequest class GetFabResponse(google.protobuf.message.Message): diff --git a/src/py/flwr/proto/fleet_pb2.py b/src/py/flwr/proto/fleet_pb2.py index d1fe719f2d91..3185bc2ce111 100644 --- a/src/py/flwr/proto/fleet_pb2.py +++ b/src/py/flwr/proto/fleet_pb2.py @@ -18,7 +18,7 @@ from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x66lwr/proto/fleet.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\x1a\x14\x66lwr/proto/run.proto\x1a\x14\x66lwr/proto/fab.proto\"*\n\x11\x43reateNodeRequest\x12\x15\n\rping_interval\x18\x01 \x01(\x01\"4\n\x12\x43reateNodeResponse\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"3\n\x11\x44\x65leteNodeRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x14\n\x12\x44\x65leteNodeResponse\"D\n\x0bPingRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x15\n\rping_interval\x18\x02 \x01(\x01\"\x1f\n\x0cPingResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\"F\n\x12PullTaskInsRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"k\n\x13PullTaskInsResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12*\n\rtask_ins_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"@\n\x12PushTaskResRequest\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes\"\xae\x01\n\x13PushTaskResResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12=\n\x07results\x18\x02 \x03(\x0b\x32,.flwr.proto.PushTaskResResponse.ResultsEntry\x1a.\n\x0cResultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\"\x1e\n\tReconnect\x12\x11\n\treconnect\x18\x01 \x01(\x04\x32\x8c\x04\n\x05\x46leet\x12M\n\nCreateNode\x12\x1d.flwr.proto.CreateNodeRequest\x1a\x1e.flwr.proto.CreateNodeResponse\"\x00\x12M\n\nDeleteNode\x12\x1d.flwr.proto.DeleteNodeRequest\x1a\x1e.flwr.proto.DeleteNodeResponse\"\x00\x12;\n\x04Ping\x12\x17.flwr.proto.PingRequest\x1a\x18.flwr.proto.PingResponse\"\x00\x12P\n\x0bPullTaskIns\x12\x1e.flwr.proto.PullTaskInsRequest\x1a\x1f.flwr.proto.PullTaskInsResponse\"\x00\x12P\n\x0bPushTaskRes\x12\x1e.flwr.proto.PushTaskResRequest\x1a\x1f.flwr.proto.PushTaskResResponse\"\x00\x12\x41\n\x06GetRun\x12\x19.flwr.proto.GetRunRequest\x1a\x1a.flwr.proto.GetRunResponse\"\x00\x12\x41\n\x06GetFab\x12\x19.flwr.proto.GetFabRequest\x1a\x1a.flwr.proto.GetFabResponse\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x66lwr/proto/fleet.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\x1a\x14\x66lwr/proto/run.proto\x1a\x14\x66lwr/proto/fab.proto\"*\n\x11\x43reateNodeRequest\x12\x15\n\rping_interval\x18\x01 \x01(\x01\"4\n\x12\x43reateNodeResponse\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"3\n\x11\x44\x65leteNodeRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x14\n\x12\x44\x65leteNodeResponse\"D\n\x0bPingRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x15\n\rping_interval\x18\x02 \x01(\x01\"\x1f\n\x0cPingResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\"F\n\x12PullTaskInsRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"k\n\x13PullTaskInsResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12*\n\rtask_ins_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"`\n\x12PushTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12*\n\rtask_res_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskRes\"\xae\x01\n\x13PushTaskResResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12=\n\x07results\x18\x02 \x03(\x0b\x32,.flwr.proto.PushTaskResResponse.ResultsEntry\x1a.\n\x0cResultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\"\x1e\n\tReconnect\x12\x11\n\treconnect\x18\x01 \x01(\x04\x32\x8c\x04\n\x05\x46leet\x12M\n\nCreateNode\x12\x1d.flwr.proto.CreateNodeRequest\x1a\x1e.flwr.proto.CreateNodeResponse\"\x00\x12M\n\nDeleteNode\x12\x1d.flwr.proto.DeleteNodeRequest\x1a\x1e.flwr.proto.DeleteNodeResponse\"\x00\x12;\n\x04Ping\x12\x17.flwr.proto.PingRequest\x1a\x18.flwr.proto.PingResponse\"\x00\x12P\n\x0bPullTaskIns\x12\x1e.flwr.proto.PullTaskInsRequest\x1a\x1f.flwr.proto.PullTaskInsResponse\"\x00\x12P\n\x0bPushTaskRes\x12\x1e.flwr.proto.PushTaskResRequest\x1a\x1f.flwr.proto.PushTaskResResponse\"\x00\x12\x41\n\x06GetRun\x12\x19.flwr.proto.GetRunRequest\x1a\x1a.flwr.proto.GetRunResponse\"\x00\x12\x41\n\x06GetFab\x12\x19.flwr.proto.GetFabRequest\x1a\x1a.flwr.proto.GetFabResponse\"\x00\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -44,13 +44,13 @@ _globals['_PULLTASKINSRESPONSE']._serialized_start=476 _globals['_PULLTASKINSRESPONSE']._serialized_end=583 _globals['_PUSHTASKRESREQUEST']._serialized_start=585 - _globals['_PUSHTASKRESREQUEST']._serialized_end=649 - _globals['_PUSHTASKRESRESPONSE']._serialized_start=652 - _globals['_PUSHTASKRESRESPONSE']._serialized_end=826 - _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_start=780 - _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_end=826 - _globals['_RECONNECT']._serialized_start=828 - _globals['_RECONNECT']._serialized_end=858 - _globals['_FLEET']._serialized_start=861 - _globals['_FLEET']._serialized_end=1385 + _globals['_PUSHTASKRESREQUEST']._serialized_end=681 + _globals['_PUSHTASKRESRESPONSE']._serialized_start=684 + _globals['_PUSHTASKRESRESPONSE']._serialized_end=858 + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_start=812 + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_end=858 + _globals['_RECONNECT']._serialized_start=860 + _globals['_RECONNECT']._serialized_end=890 + _globals['_FLEET']._serialized_start=893 + _globals['_FLEET']._serialized_end=1417 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/fleet_pb2.pyi b/src/py/flwr/proto/fleet_pb2.pyi index 5989f45c5c60..76875bc1a4b9 100644 --- a/src/py/flwr/proto/fleet_pb2.pyi +++ b/src/py/flwr/proto/fleet_pb2.pyi @@ -124,14 +124,19 @@ global___PullTaskInsResponse = PullTaskInsResponse class PushTaskResRequest(google.protobuf.message.Message): """PushTaskRes messages""" DESCRIPTOR: google.protobuf.descriptor.Descriptor + NODE_FIELD_NUMBER: builtins.int TASK_RES_LIST_FIELD_NUMBER: builtins.int @property + def node(self) -> flwr.proto.node_pb2.Node: ... + @property def task_res_list(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[flwr.proto.task_pb2.TaskRes]: ... def __init__(self, *, + node: typing.Optional[flwr.proto.node_pb2.Node] = ..., task_res_list: typing.Optional[typing.Iterable[flwr.proto.task_pb2.TaskRes]] = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["task_res_list",b"task_res_list"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["node",b"node"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["node",b"node","task_res_list",b"task_res_list"]) -> None: ... global___PushTaskResRequest = PushTaskResRequest class PushTaskResResponse(google.protobuf.message.Message): diff --git a/src/py/flwr/proto/recordset_pb2.py b/src/py/flwr/proto/recordset_pb2.py index f7f74d72182b..6b169f869ab4 100644 --- a/src/py/flwr/proto/recordset_pb2.py +++ b/src/py/flwr/proto/recordset_pb2.py @@ -14,7 +14,7 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lwr/proto/recordset.proto\x12\nflwr.proto\"\x1a\n\nDoubleList\x12\x0c\n\x04vals\x18\x01 \x03(\x01\"\x1a\n\nSint64List\x12\x0c\n\x04vals\x18\x01 \x03(\x12\"\x18\n\x08\x42oolList\x12\x0c\n\x04vals\x18\x01 \x03(\x08\"\x1a\n\nStringList\x12\x0c\n\x04vals\x18\x01 \x03(\t\"\x19\n\tBytesList\x12\x0c\n\x04vals\x18\x01 \x03(\x0c\"B\n\x05\x41rray\x12\r\n\x05\x64type\x18\x01 \x01(\t\x12\r\n\x05shape\x18\x02 \x03(\x05\x12\r\n\x05stype\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\"\x9f\x01\n\x12MetricsRecordValue\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x02 \x01(\x12H\x00\x12-\n\x0b\x64ouble_list\x18\x15 \x01(\x0b\x32\x16.flwr.proto.DoubleListH\x00\x12-\n\x0bsint64_list\x18\x16 \x01(\x0b\x32\x16.flwr.proto.Sint64ListH\x00\x42\x07\n\x05value\"\xd9\x02\n\x12\x43onfigsRecordValue\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x02 \x01(\x12H\x00\x12\x0e\n\x04\x62ool\x18\x03 \x01(\x08H\x00\x12\x10\n\x06string\x18\x04 \x01(\tH\x00\x12\x0f\n\x05\x62ytes\x18\x05 \x01(\x0cH\x00\x12-\n\x0b\x64ouble_list\x18\x15 \x01(\x0b\x32\x16.flwr.proto.DoubleListH\x00\x12-\n\x0bsint64_list\x18\x16 \x01(\x0b\x32\x16.flwr.proto.Sint64ListH\x00\x12)\n\tbool_list\x18\x17 \x01(\x0b\x32\x14.flwr.proto.BoolListH\x00\x12-\n\x0bstring_list\x18\x18 \x01(\x0b\x32\x16.flwr.proto.StringListH\x00\x12+\n\nbytes_list\x18\x19 \x01(\x0b\x32\x15.flwr.proto.BytesListH\x00\x42\x07\n\x05value\"M\n\x10ParametersRecord\x12\x11\n\tdata_keys\x18\x01 \x03(\t\x12&\n\x0b\x64\x61ta_values\x18\x02 \x03(\x0b\x32\x11.flwr.proto.Array\"\x8f\x01\n\rMetricsRecord\x12\x31\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32#.flwr.proto.MetricsRecord.DataEntry\x1aK\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.flwr.proto.MetricsRecordValue:\x02\x38\x01\"\x8f\x01\n\rConfigsRecord\x12\x31\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32#.flwr.proto.ConfigsRecord.DataEntry\x1aK\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.flwr.proto.ConfigsRecordValue:\x02\x38\x01\"\x97\x03\n\tRecordSet\x12\x39\n\nparameters\x18\x01 \x03(\x0b\x32%.flwr.proto.RecordSet.ParametersEntry\x12\x33\n\x07metrics\x18\x02 \x03(\x0b\x32\".flwr.proto.RecordSet.MetricsEntry\x12\x33\n\x07\x63onfigs\x18\x03 \x03(\x0b\x32\".flwr.proto.RecordSet.ConfigsEntry\x1aO\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12+\n\x05value\x18\x02 \x01(\x0b\x32\x1c.flwr.proto.ParametersRecord:\x02\x38\x01\x1aI\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.flwr.proto.MetricsRecord:\x02\x38\x01\x1aI\n\x0c\x43onfigsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.flwr.proto.ConfigsRecord:\x02\x38\x01\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lwr/proto/recordset.proto\x12\nflwr.proto\"\x1a\n\nDoubleList\x12\x0c\n\x04vals\x18\x01 \x03(\x01\"\x18\n\x08SintList\x12\x0c\n\x04vals\x18\x01 \x03(\x12\"\x18\n\x08UintList\x12\x0c\n\x04vals\x18\x01 \x03(\x04\"\x18\n\x08\x42oolList\x12\x0c\n\x04vals\x18\x01 \x03(\x08\"\x1a\n\nStringList\x12\x0c\n\x04vals\x18\x01 \x03(\t\"\x19\n\tBytesList\x12\x0c\n\x04vals\x18\x01 \x03(\x0c\"B\n\x05\x41rray\x12\r\n\x05\x64type\x18\x01 \x01(\t\x12\r\n\x05shape\x18\x02 \x03(\x05\x12\r\n\x05stype\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\"\xd8\x01\n\x12MetricsRecordValue\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x02 \x01(\x12H\x00\x12\x10\n\x06uint64\x18\x03 \x01(\x04H\x00\x12-\n\x0b\x64ouble_list\x18\x15 \x01(\x0b\x32\x16.flwr.proto.DoubleListH\x00\x12)\n\tsint_list\x18\x16 \x01(\x0b\x32\x14.flwr.proto.SintListH\x00\x12)\n\tuint_list\x18\x17 \x01(\x0b\x32\x14.flwr.proto.UintListH\x00\x42\x07\n\x05value\"\x92\x03\n\x12\x43onfigsRecordValue\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x02 \x01(\x12H\x00\x12\x10\n\x06uint64\x18\x03 \x01(\x04H\x00\x12\x0e\n\x04\x62ool\x18\x04 \x01(\x08H\x00\x12\x10\n\x06string\x18\x05 \x01(\tH\x00\x12\x0f\n\x05\x62ytes\x18\x06 \x01(\x0cH\x00\x12-\n\x0b\x64ouble_list\x18\x15 \x01(\x0b\x32\x16.flwr.proto.DoubleListH\x00\x12)\n\tsint_list\x18\x16 \x01(\x0b\x32\x14.flwr.proto.SintListH\x00\x12)\n\tuint_list\x18\x17 \x01(\x0b\x32\x14.flwr.proto.UintListH\x00\x12)\n\tbool_list\x18\x18 \x01(\x0b\x32\x14.flwr.proto.BoolListH\x00\x12-\n\x0bstring_list\x18\x19 \x01(\x0b\x32\x16.flwr.proto.StringListH\x00\x12+\n\nbytes_list\x18\x1a \x01(\x0b\x32\x15.flwr.proto.BytesListH\x00\x42\x07\n\x05value\"M\n\x10ParametersRecord\x12\x11\n\tdata_keys\x18\x01 \x03(\t\x12&\n\x0b\x64\x61ta_values\x18\x02 \x03(\x0b\x32\x11.flwr.proto.Array\"\x8f\x01\n\rMetricsRecord\x12\x31\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32#.flwr.proto.MetricsRecord.DataEntry\x1aK\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.flwr.proto.MetricsRecordValue:\x02\x38\x01\"\x8f\x01\n\rConfigsRecord\x12\x31\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32#.flwr.proto.ConfigsRecord.DataEntry\x1aK\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.flwr.proto.ConfigsRecordValue:\x02\x38\x01\"\x97\x03\n\tRecordSet\x12\x39\n\nparameters\x18\x01 \x03(\x0b\x32%.flwr.proto.RecordSet.ParametersEntry\x12\x33\n\x07metrics\x18\x02 \x03(\x0b\x32\".flwr.proto.RecordSet.MetricsEntry\x12\x33\n\x07\x63onfigs\x18\x03 \x03(\x0b\x32\".flwr.proto.RecordSet.ConfigsEntry\x1aO\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12+\n\x05value\x18\x02 \x01(\x0b\x32\x1c.flwr.proto.ParametersRecord:\x02\x38\x01\x1aI\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.flwr.proto.MetricsRecord:\x02\x38\x01\x1aI\n\x0c\x43onfigsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.flwr.proto.ConfigsRecord:\x02\x38\x01\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -33,36 +33,38 @@ _globals['_RECORDSET_CONFIGSENTRY']._serialized_options = b'8\001' _globals['_DOUBLELIST']._serialized_start=42 _globals['_DOUBLELIST']._serialized_end=68 - _globals['_SINT64LIST']._serialized_start=70 - _globals['_SINT64LIST']._serialized_end=96 - _globals['_BOOLLIST']._serialized_start=98 - _globals['_BOOLLIST']._serialized_end=122 - _globals['_STRINGLIST']._serialized_start=124 - _globals['_STRINGLIST']._serialized_end=150 - _globals['_BYTESLIST']._serialized_start=152 - _globals['_BYTESLIST']._serialized_end=177 - _globals['_ARRAY']._serialized_start=179 - _globals['_ARRAY']._serialized_end=245 - _globals['_METRICSRECORDVALUE']._serialized_start=248 - _globals['_METRICSRECORDVALUE']._serialized_end=407 - _globals['_CONFIGSRECORDVALUE']._serialized_start=410 - _globals['_CONFIGSRECORDVALUE']._serialized_end=755 - _globals['_PARAMETERSRECORD']._serialized_start=757 - _globals['_PARAMETERSRECORD']._serialized_end=834 - _globals['_METRICSRECORD']._serialized_start=837 - _globals['_METRICSRECORD']._serialized_end=980 - _globals['_METRICSRECORD_DATAENTRY']._serialized_start=905 - _globals['_METRICSRECORD_DATAENTRY']._serialized_end=980 - _globals['_CONFIGSRECORD']._serialized_start=983 - _globals['_CONFIGSRECORD']._serialized_end=1126 - _globals['_CONFIGSRECORD_DATAENTRY']._serialized_start=1051 - _globals['_CONFIGSRECORD_DATAENTRY']._serialized_end=1126 - _globals['_RECORDSET']._serialized_start=1129 - _globals['_RECORDSET']._serialized_end=1536 - _globals['_RECORDSET_PARAMETERSENTRY']._serialized_start=1307 - _globals['_RECORDSET_PARAMETERSENTRY']._serialized_end=1386 - _globals['_RECORDSET_METRICSENTRY']._serialized_start=1388 - _globals['_RECORDSET_METRICSENTRY']._serialized_end=1461 - _globals['_RECORDSET_CONFIGSENTRY']._serialized_start=1463 - _globals['_RECORDSET_CONFIGSENTRY']._serialized_end=1536 + _globals['_SINTLIST']._serialized_start=70 + _globals['_SINTLIST']._serialized_end=94 + _globals['_UINTLIST']._serialized_start=96 + _globals['_UINTLIST']._serialized_end=120 + _globals['_BOOLLIST']._serialized_start=122 + _globals['_BOOLLIST']._serialized_end=146 + _globals['_STRINGLIST']._serialized_start=148 + _globals['_STRINGLIST']._serialized_end=174 + _globals['_BYTESLIST']._serialized_start=176 + _globals['_BYTESLIST']._serialized_end=201 + _globals['_ARRAY']._serialized_start=203 + _globals['_ARRAY']._serialized_end=269 + _globals['_METRICSRECORDVALUE']._serialized_start=272 + _globals['_METRICSRECORDVALUE']._serialized_end=488 + _globals['_CONFIGSRECORDVALUE']._serialized_start=491 + _globals['_CONFIGSRECORDVALUE']._serialized_end=893 + _globals['_PARAMETERSRECORD']._serialized_start=895 + _globals['_PARAMETERSRECORD']._serialized_end=972 + _globals['_METRICSRECORD']._serialized_start=975 + _globals['_METRICSRECORD']._serialized_end=1118 + _globals['_METRICSRECORD_DATAENTRY']._serialized_start=1043 + _globals['_METRICSRECORD_DATAENTRY']._serialized_end=1118 + _globals['_CONFIGSRECORD']._serialized_start=1121 + _globals['_CONFIGSRECORD']._serialized_end=1264 + _globals['_CONFIGSRECORD_DATAENTRY']._serialized_start=1189 + _globals['_CONFIGSRECORD_DATAENTRY']._serialized_end=1264 + _globals['_RECORDSET']._serialized_start=1267 + _globals['_RECORDSET']._serialized_end=1674 + _globals['_RECORDSET_PARAMETERSENTRY']._serialized_start=1445 + _globals['_RECORDSET_PARAMETERSENTRY']._serialized_end=1524 + _globals['_RECORDSET_METRICSENTRY']._serialized_start=1526 + _globals['_RECORDSET_METRICSENTRY']._serialized_end=1599 + _globals['_RECORDSET_CONFIGSENTRY']._serialized_start=1601 + _globals['_RECORDSET_CONFIGSENTRY']._serialized_end=1674 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/recordset_pb2.pyi b/src/py/flwr/proto/recordset_pb2.pyi index 86244697129c..91d17e3e6473 100644 --- a/src/py/flwr/proto/recordset_pb2.pyi +++ b/src/py/flwr/proto/recordset_pb2.pyi @@ -23,7 +23,7 @@ class DoubleList(google.protobuf.message.Message): def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... global___DoubleList = DoubleList -class Sint64List(google.protobuf.message.Message): +class SintList(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor VALS_FIELD_NUMBER: builtins.int @property @@ -33,7 +33,19 @@ class Sint64List(google.protobuf.message.Message): vals: typing.Optional[typing.Iterable[builtins.int]] = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... -global___Sint64List = Sint64List +global___SintList = SintList + +class UintList(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + VALS_FIELD_NUMBER: builtins.int + @property + def vals(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ... + def __init__(self, + *, + vals: typing.Optional[typing.Iterable[builtins.int]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... +global___UintList = UintList class BoolList(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor @@ -96,39 +108,48 @@ class MetricsRecordValue(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor DOUBLE_FIELD_NUMBER: builtins.int SINT64_FIELD_NUMBER: builtins.int + UINT64_FIELD_NUMBER: builtins.int DOUBLE_LIST_FIELD_NUMBER: builtins.int - SINT64_LIST_FIELD_NUMBER: builtins.int + SINT_LIST_FIELD_NUMBER: builtins.int + UINT_LIST_FIELD_NUMBER: builtins.int double: builtins.float """Single element""" sint64: builtins.int + uint64: builtins.int @property def double_list(self) -> global___DoubleList: """List types""" pass @property - def sint64_list(self) -> global___Sint64List: ... + def sint_list(self) -> global___SintList: ... + @property + def uint_list(self) -> global___UintList: ... def __init__(self, *, double: builtins.float = ..., sint64: builtins.int = ..., + uint64: builtins.int = ..., double_list: typing.Optional[global___DoubleList] = ..., - sint64_list: typing.Optional[global___Sint64List] = ..., + sint_list: typing.Optional[global___SintList] = ..., + uint_list: typing.Optional[global___UintList] = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","value",b"value"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","value",b"value"]) -> None: ... - def WhichOneof(self, oneof_group: typing_extensions.Literal["value",b"value"]) -> typing.Optional[typing_extensions.Literal["double","sint64","double_list","sint64_list"]]: ... + def HasField(self, field_name: typing_extensions.Literal["double",b"double","double_list",b"double_list","sint64",b"sint64","sint_list",b"sint_list","uint64",b"uint64","uint_list",b"uint_list","value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["double",b"double","double_list",b"double_list","sint64",b"sint64","sint_list",b"sint_list","uint64",b"uint64","uint_list",b"uint_list","value",b"value"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["value",b"value"]) -> typing.Optional[typing_extensions.Literal["double","sint64","uint64","double_list","sint_list","uint_list"]]: ... global___MetricsRecordValue = MetricsRecordValue class ConfigsRecordValue(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor DOUBLE_FIELD_NUMBER: builtins.int SINT64_FIELD_NUMBER: builtins.int + UINT64_FIELD_NUMBER: builtins.int BOOL_FIELD_NUMBER: builtins.int STRING_FIELD_NUMBER: builtins.int BYTES_FIELD_NUMBER: builtins.int DOUBLE_LIST_FIELD_NUMBER: builtins.int - SINT64_LIST_FIELD_NUMBER: builtins.int + SINT_LIST_FIELD_NUMBER: builtins.int + UINT_LIST_FIELD_NUMBER: builtins.int BOOL_LIST_FIELD_NUMBER: builtins.int STRING_LIST_FIELD_NUMBER: builtins.int BYTES_LIST_FIELD_NUMBER: builtins.int @@ -136,6 +157,7 @@ class ConfigsRecordValue(google.protobuf.message.Message): """Single element""" sint64: builtins.int + uint64: builtins.int bool: builtins.bool string: typing.Text bytes: builtins.bytes @@ -144,7 +166,9 @@ class ConfigsRecordValue(google.protobuf.message.Message): """List types""" pass @property - def sint64_list(self) -> global___Sint64List: ... + def sint_list(self) -> global___SintList: ... + @property + def uint_list(self) -> global___UintList: ... @property def bool_list(self) -> global___BoolList: ... @property @@ -155,18 +179,20 @@ class ConfigsRecordValue(google.protobuf.message.Message): *, double: builtins.float = ..., sint64: builtins.int = ..., + uint64: builtins.int = ..., bool: builtins.bool = ..., string: typing.Text = ..., bytes: builtins.bytes = ..., double_list: typing.Optional[global___DoubleList] = ..., - sint64_list: typing.Optional[global___Sint64List] = ..., + sint_list: typing.Optional[global___SintList] = ..., + uint_list: typing.Optional[global___UintList] = ..., bool_list: typing.Optional[global___BoolList] = ..., string_list: typing.Optional[global___StringList] = ..., bytes_list: typing.Optional[global___BytesList] = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["bool",b"bool","bool_list",b"bool_list","bytes",b"bytes","bytes_list",b"bytes_list","double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","string",b"string","string_list",b"string_list","value",b"value"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["bool",b"bool","bool_list",b"bool_list","bytes",b"bytes","bytes_list",b"bytes_list","double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","string",b"string","string_list",b"string_list","value",b"value"]) -> None: ... - def WhichOneof(self, oneof_group: typing_extensions.Literal["value",b"value"]) -> typing.Optional[typing_extensions.Literal["double","sint64","bool","string","bytes","double_list","sint64_list","bool_list","string_list","bytes_list"]]: ... + def HasField(self, field_name: typing_extensions.Literal["bool",b"bool","bool_list",b"bool_list","bytes",b"bytes","bytes_list",b"bytes_list","double",b"double","double_list",b"double_list","sint64",b"sint64","sint_list",b"sint_list","string",b"string","string_list",b"string_list","uint64",b"uint64","uint_list",b"uint_list","value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["bool",b"bool","bool_list",b"bool_list","bytes",b"bytes","bytes_list",b"bytes_list","double",b"double","double_list",b"double_list","sint64",b"sint64","sint_list",b"sint_list","string",b"string","string_list",b"string_list","uint64",b"uint64","uint_list",b"uint_list","value",b"value"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["value",b"value"]) -> typing.Optional[typing_extensions.Literal["double","sint64","uint64","bool","string","bytes","double_list","sint_list","uint_list","bool_list","string_list","bytes_list"]]: ... global___ConfigsRecordValue = ConfigsRecordValue class ParametersRecord(google.protobuf.message.Message): diff --git a/src/py/flwr/proto/run_pb2.py b/src/py/flwr/proto/run_pb2.py index d59cc26fbb48..cc3f6897918f 100644 --- a/src/py/flwr/proto/run_pb2.py +++ b/src/py/flwr/proto/run_pb2.py @@ -13,10 +13,11 @@ from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 +from flwr.proto import node_pb2 as flwr_dot_proto_dot_node__pb2 from flwr.proto import transport_pb2 as flwr_dot_proto_dot_transport__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14\x66lwr/proto/run.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xd5\x01\n\x03Run\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12\x0e\n\x06\x66\x61\x62_id\x18\x02 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x03 \x01(\t\x12<\n\x0foverride_config\x18\x04 \x03(\x0b\x32#.flwr.proto.Run.OverrideConfigEntry\x12\x10\n\x08\x66\x61\x62_hash\x18\x05 \x01(\t\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"@\n\tRunStatus\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x12\n\nsub_status\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x65tails\x18\x03 \x01(\t\"\xeb\x01\n\x10\x43reateRunRequest\x12\x0e\n\x06\x66\x61\x62_id\x18\x01 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x02 \x01(\t\x12I\n\x0foverride_config\x18\x03 \x03(\x0b\x32\x30.flwr.proto.CreateRunRequest.OverrideConfigEntry\x12\x1c\n\x03\x66\x61\x62\x18\x04 \x01(\x0b\x32\x0f.flwr.proto.Fab\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"#\n\x11\x43reateRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"\x1f\n\rGetRunRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\".\n\x0eGetRunResponse\x12\x1c\n\x03run\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Run\"S\n\x16UpdateRunStatusRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12)\n\nrun_status\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RunStatus\"\x19\n\x17UpdateRunStatusResponse\"&\n\x13GetRunStatusRequest\x12\x0f\n\x07run_ids\x18\x01 \x03(\x04\"\xb1\x01\n\x14GetRunStatusResponse\x12L\n\x0frun_status_dict\x18\x01 \x03(\x0b\x32\x33.flwr.proto.GetRunStatusResponse.RunStatusDictEntry\x1aK\n\x12RunStatusDictEntry\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RunStatus:\x02\x38\x01\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14\x66lwr/proto/run.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xd5\x01\n\x03Run\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12\x0e\n\x06\x66\x61\x62_id\x18\x02 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x03 \x01(\t\x12<\n\x0foverride_config\x18\x04 \x03(\x0b\x32#.flwr.proto.Run.OverrideConfigEntry\x12\x10\n\x08\x66\x61\x62_hash\x18\x05 \x01(\t\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"@\n\tRunStatus\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x12\n\nsub_status\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x65tails\x18\x03 \x01(\t\"\xeb\x01\n\x10\x43reateRunRequest\x12\x0e\n\x06\x66\x61\x62_id\x18\x01 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x02 \x01(\t\x12I\n\x0foverride_config\x18\x03 \x03(\x0b\x32\x30.flwr.proto.CreateRunRequest.OverrideConfigEntry\x12\x1c\n\x03\x66\x61\x62\x18\x04 \x01(\x0b\x32\x0f.flwr.proto.Fab\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"#\n\x11\x43reateRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"?\n\rGetRunRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x0e\n\x06run_id\x18\x02 \x01(\x04\".\n\x0eGetRunResponse\x12\x1c\n\x03run\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Run\"S\n\x16UpdateRunStatusRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12)\n\nrun_status\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RunStatus\"\x19\n\x17UpdateRunStatusResponse\"F\n\x13GetRunStatusRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x0f\n\x07run_ids\x18\x02 \x03(\x04\"\xb1\x01\n\x14GetRunStatusResponse\x12L\n\x0frun_status_dict\x18\x01 \x03(\x0b\x32\x33.flwr.proto.GetRunStatusResponse.RunStatusDictEntry\x1aK\n\x12RunStatusDictEntry\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RunStatus:\x02\x38\x01\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -29,30 +30,30 @@ _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_options = b'8\001' _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._options = None _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._serialized_options = b'8\001' - _globals['_RUN']._serialized_start=87 - _globals['_RUN']._serialized_end=300 - _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_start=227 - _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_end=300 - _globals['_RUNSTATUS']._serialized_start=302 - _globals['_RUNSTATUS']._serialized_end=366 - _globals['_CREATERUNREQUEST']._serialized_start=369 - _globals['_CREATERUNREQUEST']._serialized_end=604 - _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_start=227 - _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_end=300 - _globals['_CREATERUNRESPONSE']._serialized_start=606 - _globals['_CREATERUNRESPONSE']._serialized_end=641 - _globals['_GETRUNREQUEST']._serialized_start=643 - _globals['_GETRUNREQUEST']._serialized_end=674 - _globals['_GETRUNRESPONSE']._serialized_start=676 - _globals['_GETRUNRESPONSE']._serialized_end=722 - _globals['_UPDATERUNSTATUSREQUEST']._serialized_start=724 - _globals['_UPDATERUNSTATUSREQUEST']._serialized_end=807 - _globals['_UPDATERUNSTATUSRESPONSE']._serialized_start=809 - _globals['_UPDATERUNSTATUSRESPONSE']._serialized_end=834 - _globals['_GETRUNSTATUSREQUEST']._serialized_start=836 - _globals['_GETRUNSTATUSREQUEST']._serialized_end=874 - _globals['_GETRUNSTATUSRESPONSE']._serialized_start=877 - _globals['_GETRUNSTATUSRESPONSE']._serialized_end=1054 - _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._serialized_start=979 - _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._serialized_end=1054 + _globals['_RUN']._serialized_start=110 + _globals['_RUN']._serialized_end=323 + _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_start=250 + _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_end=323 + _globals['_RUNSTATUS']._serialized_start=325 + _globals['_RUNSTATUS']._serialized_end=389 + _globals['_CREATERUNREQUEST']._serialized_start=392 + _globals['_CREATERUNREQUEST']._serialized_end=627 + _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_start=250 + _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_end=323 + _globals['_CREATERUNRESPONSE']._serialized_start=629 + _globals['_CREATERUNRESPONSE']._serialized_end=664 + _globals['_GETRUNREQUEST']._serialized_start=666 + _globals['_GETRUNREQUEST']._serialized_end=729 + _globals['_GETRUNRESPONSE']._serialized_start=731 + _globals['_GETRUNRESPONSE']._serialized_end=777 + _globals['_UPDATERUNSTATUSREQUEST']._serialized_start=779 + _globals['_UPDATERUNSTATUSREQUEST']._serialized_end=862 + _globals['_UPDATERUNSTATUSRESPONSE']._serialized_start=864 + _globals['_UPDATERUNSTATUSRESPONSE']._serialized_end=889 + _globals['_GETRUNSTATUSREQUEST']._serialized_start=891 + _globals['_GETRUNSTATUSREQUEST']._serialized_end=961 + _globals['_GETRUNSTATUSRESPONSE']._serialized_start=964 + _globals['_GETRUNSTATUSRESPONSE']._serialized_end=1141 + _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._serialized_start=1066 + _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._serialized_end=1141 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/run_pb2.pyi b/src/py/flwr/proto/run_pb2.pyi index cec90c4d2d4c..16411712eaf2 100644 --- a/src/py/flwr/proto/run_pb2.pyi +++ b/src/py/flwr/proto/run_pb2.pyi @@ -4,6 +4,7 @@ isort:skip_file """ import builtins import flwr.proto.fab_pb2 +import flwr.proto.node_pb2 import flwr.proto.transport_pb2 import google.protobuf.descriptor import google.protobuf.internal.containers @@ -128,13 +129,18 @@ global___CreateRunResponse = CreateRunResponse class GetRunRequest(google.protobuf.message.Message): """GetRun""" DESCRIPTOR: google.protobuf.descriptor.Descriptor + NODE_FIELD_NUMBER: builtins.int RUN_ID_FIELD_NUMBER: builtins.int + @property + def node(self) -> flwr.proto.node_pb2.Node: ... run_id: builtins.int def __init__(self, *, + node: typing.Optional[flwr.proto.node_pb2.Node] = ..., run_id: builtins.int = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["node",b"node"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["node",b"node","run_id",b"run_id"]) -> None: ... global___GetRunRequest = GetRunRequest class GetRunResponse(google.protobuf.message.Message): @@ -176,14 +182,19 @@ global___UpdateRunStatusResponse = UpdateRunStatusResponse class GetRunStatusRequest(google.protobuf.message.Message): """GetRunStatus""" DESCRIPTOR: google.protobuf.descriptor.Descriptor + NODE_FIELD_NUMBER: builtins.int RUN_IDS_FIELD_NUMBER: builtins.int @property + def node(self) -> flwr.proto.node_pb2.Node: ... + @property def run_ids(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ... def __init__(self, *, + node: typing.Optional[flwr.proto.node_pb2.Node] = ..., run_ids: typing.Optional[typing.Iterable[builtins.int]] = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["run_ids",b"run_ids"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["node",b"node"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["node",b"node","run_ids",b"run_ids"]) -> None: ... global___GetRunStatusRequest = GetRunStatusRequest class GetRunStatusResponse(google.protobuf.message.Message): diff --git a/src/py/flwr/proto/transport_pb2.py b/src/py/flwr/proto/transport_pb2.py index d3aae72b63ab..b457463f99ca 100644 --- a/src/py/flwr/proto/transport_pb2.py +++ b/src/py/flwr/proto/transport_pb2.py @@ -14,7 +14,7 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lwr/proto/transport.proto\x12\nflwr.proto\"9\n\x06Status\x12\x1e\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x10.flwr.proto.Code\x12\x0f\n\x07message\x18\x02 \x01(\t\"2\n\nParameters\x12\x0f\n\x07tensors\x18\x01 \x03(\x0c\x12\x13\n\x0btensor_type\x18\x02 \x01(\t\"\xba\x08\n\rServerMessage\x12?\n\rreconnect_ins\x18\x01 \x01(\x0b\x32&.flwr.proto.ServerMessage.ReconnectInsH\x00\x12H\n\x12get_properties_ins\x18\x02 \x01(\x0b\x32*.flwr.proto.ServerMessage.GetPropertiesInsH\x00\x12H\n\x12get_parameters_ins\x18\x03 \x01(\x0b\x32*.flwr.proto.ServerMessage.GetParametersInsH\x00\x12\x33\n\x07\x66it_ins\x18\x04 \x01(\x0b\x32 .flwr.proto.ServerMessage.FitInsH\x00\x12=\n\x0c\x65valuate_ins\x18\x05 \x01(\x0b\x32%.flwr.proto.ServerMessage.EvaluateInsH\x00\x1a\x1f\n\x0cReconnectIns\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x1a\x9d\x01\n\x10GetPropertiesIns\x12\x46\n\x06\x63onfig\x18\x01 \x03(\x0b\x32\x36.flwr.proto.ServerMessage.GetPropertiesIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x9d\x01\n\x10GetParametersIns\x12\x46\n\x06\x63onfig\x18\x01 \x03(\x0b\x32\x36.flwr.proto.ServerMessage.GetParametersIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xb5\x01\n\x06\x46itIns\x12*\n\nparameters\x18\x01 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12<\n\x06\x63onfig\x18\x02 \x03(\x0b\x32,.flwr.proto.ServerMessage.FitIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xbf\x01\n\x0b\x45valuateIns\x12*\n\nparameters\x18\x01 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12\x41\n\x06\x63onfig\x18\x02 \x03(\x0b\x32\x31.flwr.proto.ServerMessage.EvaluateIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x42\x05\n\x03msg\"\xa0\t\n\rClientMessage\x12\x41\n\x0e\x64isconnect_res\x18\x01 \x01(\x0b\x32\'.flwr.proto.ClientMessage.DisconnectResH\x00\x12H\n\x12get_properties_res\x18\x02 \x01(\x0b\x32*.flwr.proto.ClientMessage.GetPropertiesResH\x00\x12H\n\x12get_parameters_res\x18\x03 \x01(\x0b\x32*.flwr.proto.ClientMessage.GetParametersResH\x00\x12\x33\n\x07\x66it_res\x18\x04 \x01(\x0b\x32 .flwr.proto.ClientMessage.FitResH\x00\x12=\n\x0c\x65valuate_res\x18\x05 \x01(\x0b\x32%.flwr.proto.ClientMessage.EvaluateResH\x00\x1a\x33\n\rDisconnectRes\x12\"\n\x06reason\x18\x01 \x01(\x0e\x32\x12.flwr.proto.Reason\x1a\xcd\x01\n\x10GetPropertiesRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12N\n\nproperties\x18\x02 \x03(\x0b\x32:.flwr.proto.ClientMessage.GetPropertiesRes.PropertiesEntry\x1a\x45\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x62\n\x10GetParametersRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12*\n\nparameters\x18\x02 \x01(\x0b\x32\x16.flwr.proto.Parameters\x1a\xf2\x01\n\x06\x46itRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12*\n\nparameters\x18\x02 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12\x14\n\x0cnum_examples\x18\x03 \x01(\x03\x12>\n\x07metrics\x18\x04 \x03(\x0b\x32-.flwr.proto.ClientMessage.FitRes.MetricsEntry\x1a\x42\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xde\x01\n\x0b\x45valuateRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12\x0c\n\x04loss\x18\x02 \x01(\x02\x12\x14\n\x0cnum_examples\x18\x03 \x01(\x03\x12\x43\n\x07metrics\x18\x04 \x03(\x0b\x32\x32.flwr.proto.ClientMessage.EvaluateRes.MetricsEntry\x1a\x42\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x42\x05\n\x03msg\"i\n\x06Scalar\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x08 \x01(\x12H\x00\x12\x0e\n\x04\x62ool\x18\r \x01(\x08H\x00\x12\x10\n\x06string\x18\x0e \x01(\tH\x00\x12\x0f\n\x05\x62ytes\x18\x0f \x01(\x0cH\x00\x42\x08\n\x06scalar*\x8d\x01\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\"\n\x1eGET_PROPERTIES_NOT_IMPLEMENTED\x10\x01\x12\"\n\x1eGET_PARAMETERS_NOT_IMPLEMENTED\x10\x02\x12\x17\n\x13\x46IT_NOT_IMPLEMENTED\x10\x03\x12\x1c\n\x18\x45VALUATE_NOT_IMPLEMENTED\x10\x04*[\n\x06Reason\x12\x0b\n\x07UNKNOWN\x10\x00\x12\r\n\tRECONNECT\x10\x01\x12\x16\n\x12POWER_DISCONNECTED\x10\x02\x12\x14\n\x10WIFI_UNAVAILABLE\x10\x03\x12\x07\n\x03\x41\x43K\x10\x04\x32S\n\rFlowerService\x12\x42\n\x04Join\x12\x19.flwr.proto.ClientMessage\x1a\x19.flwr.proto.ServerMessage\"\x00(\x01\x30\x01\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lwr/proto/transport.proto\x12\nflwr.proto\"9\n\x06Status\x12\x1e\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x10.flwr.proto.Code\x12\x0f\n\x07message\x18\x02 \x01(\t\"2\n\nParameters\x12\x0f\n\x07tensors\x18\x01 \x03(\x0c\x12\x13\n\x0btensor_type\x18\x02 \x01(\t\"\xba\x08\n\rServerMessage\x12?\n\rreconnect_ins\x18\x01 \x01(\x0b\x32&.flwr.proto.ServerMessage.ReconnectInsH\x00\x12H\n\x12get_properties_ins\x18\x02 \x01(\x0b\x32*.flwr.proto.ServerMessage.GetPropertiesInsH\x00\x12H\n\x12get_parameters_ins\x18\x03 \x01(\x0b\x32*.flwr.proto.ServerMessage.GetParametersInsH\x00\x12\x33\n\x07\x66it_ins\x18\x04 \x01(\x0b\x32 .flwr.proto.ServerMessage.FitInsH\x00\x12=\n\x0c\x65valuate_ins\x18\x05 \x01(\x0b\x32%.flwr.proto.ServerMessage.EvaluateInsH\x00\x1a\x1f\n\x0cReconnectIns\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x1a\x9d\x01\n\x10GetPropertiesIns\x12\x46\n\x06\x63onfig\x18\x01 \x03(\x0b\x32\x36.flwr.proto.ServerMessage.GetPropertiesIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x9d\x01\n\x10GetParametersIns\x12\x46\n\x06\x63onfig\x18\x01 \x03(\x0b\x32\x36.flwr.proto.ServerMessage.GetParametersIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xb5\x01\n\x06\x46itIns\x12*\n\nparameters\x18\x01 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12<\n\x06\x63onfig\x18\x02 \x03(\x0b\x32,.flwr.proto.ServerMessage.FitIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xbf\x01\n\x0b\x45valuateIns\x12*\n\nparameters\x18\x01 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12\x41\n\x06\x63onfig\x18\x02 \x03(\x0b\x32\x31.flwr.proto.ServerMessage.EvaluateIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x42\x05\n\x03msg\"\xa0\t\n\rClientMessage\x12\x41\n\x0e\x64isconnect_res\x18\x01 \x01(\x0b\x32\'.flwr.proto.ClientMessage.DisconnectResH\x00\x12H\n\x12get_properties_res\x18\x02 \x01(\x0b\x32*.flwr.proto.ClientMessage.GetPropertiesResH\x00\x12H\n\x12get_parameters_res\x18\x03 \x01(\x0b\x32*.flwr.proto.ClientMessage.GetParametersResH\x00\x12\x33\n\x07\x66it_res\x18\x04 \x01(\x0b\x32 .flwr.proto.ClientMessage.FitResH\x00\x12=\n\x0c\x65valuate_res\x18\x05 \x01(\x0b\x32%.flwr.proto.ClientMessage.EvaluateResH\x00\x1a\x33\n\rDisconnectRes\x12\"\n\x06reason\x18\x01 \x01(\x0e\x32\x12.flwr.proto.Reason\x1a\xcd\x01\n\x10GetPropertiesRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12N\n\nproperties\x18\x02 \x03(\x0b\x32:.flwr.proto.ClientMessage.GetPropertiesRes.PropertiesEntry\x1a\x45\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x62\n\x10GetParametersRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12*\n\nparameters\x18\x02 \x01(\x0b\x32\x16.flwr.proto.Parameters\x1a\xf2\x01\n\x06\x46itRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12*\n\nparameters\x18\x02 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12\x14\n\x0cnum_examples\x18\x03 \x01(\x03\x12>\n\x07metrics\x18\x04 \x03(\x0b\x32-.flwr.proto.ClientMessage.FitRes.MetricsEntry\x1a\x42\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xde\x01\n\x0b\x45valuateRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12\x0c\n\x04loss\x18\x02 \x01(\x02\x12\x14\n\x0cnum_examples\x18\x03 \x01(\x03\x12\x43\n\x07metrics\x18\x04 \x03(\x0b\x32\x32.flwr.proto.ClientMessage.EvaluateRes.MetricsEntry\x1a\x42\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x42\x05\n\x03msg\"{\n\x06Scalar\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06uint64\x18\x06 \x01(\x04H\x00\x12\x10\n\x06sint64\x18\x08 \x01(\x12H\x00\x12\x0e\n\x04\x62ool\x18\r \x01(\x08H\x00\x12\x10\n\x06string\x18\x0e \x01(\tH\x00\x12\x0f\n\x05\x62ytes\x18\x0f \x01(\x0cH\x00\x42\x08\n\x06scalar*\x8d\x01\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\"\n\x1eGET_PROPERTIES_NOT_IMPLEMENTED\x10\x01\x12\"\n\x1eGET_PARAMETERS_NOT_IMPLEMENTED\x10\x02\x12\x17\n\x13\x46IT_NOT_IMPLEMENTED\x10\x03\x12\x1c\n\x18\x45VALUATE_NOT_IMPLEMENTED\x10\x04*[\n\x06Reason\x12\x0b\n\x07UNKNOWN\x10\x00\x12\r\n\tRECONNECT\x10\x01\x12\x16\n\x12POWER_DISCONNECTED\x10\x02\x12\x14\n\x10WIFI_UNAVAILABLE\x10\x03\x12\x07\n\x03\x41\x43K\x10\x04\x32S\n\rFlowerService\x12\x42\n\x04Join\x12\x19.flwr.proto.ClientMessage\x1a\x19.flwr.proto.ServerMessage\"\x00(\x01\x30\x01\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -35,10 +35,10 @@ _globals['_CLIENTMESSAGE_FITRES_METRICSENTRY']._serialized_options = b'8\001' _globals['_CLIENTMESSAGE_EVALUATERES_METRICSENTRY']._options = None _globals['_CLIENTMESSAGE_EVALUATERES_METRICSENTRY']._serialized_options = b'8\001' - _globals['_CODE']._serialized_start=2533 - _globals['_CODE']._serialized_end=2674 - _globals['_REASON']._serialized_start=2676 - _globals['_REASON']._serialized_end=2767 + _globals['_CODE']._serialized_start=2551 + _globals['_CODE']._serialized_end=2692 + _globals['_REASON']._serialized_start=2694 + _globals['_REASON']._serialized_end=2785 _globals['_STATUS']._serialized_start=42 _globals['_STATUS']._serialized_end=99 _globals['_PARAMETERS']._serialized_start=101 @@ -82,7 +82,7 @@ _globals['_CLIENTMESSAGE_EVALUATERES_METRICSENTRY']._serialized_start=2125 _globals['_CLIENTMESSAGE_EVALUATERES_METRICSENTRY']._serialized_end=2191 _globals['_SCALAR']._serialized_start=2425 - _globals['_SCALAR']._serialized_end=2530 - _globals['_FLOWERSERVICE']._serialized_start=2769 - _globals['_FLOWERSERVICE']._serialized_end=2852 + _globals['_SCALAR']._serialized_end=2548 + _globals['_FLOWERSERVICE']._serialized_start=2787 + _globals['_FLOWERSERVICE']._serialized_end=2870 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/transport_pb2.pyi b/src/py/flwr/proto/transport_pb2.pyi index d10a1536ceab..0fe541f0a320 100644 --- a/src/py/flwr/proto/transport_pb2.pyi +++ b/src/py/flwr/proto/transport_pb2.pyi @@ -402,20 +402,22 @@ global___ClientMessage = ClientMessage class Scalar(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor DOUBLE_FIELD_NUMBER: builtins.int + UINT64_FIELD_NUMBER: builtins.int SINT64_FIELD_NUMBER: builtins.int BOOL_FIELD_NUMBER: builtins.int STRING_FIELD_NUMBER: builtins.int BYTES_FIELD_NUMBER: builtins.int double: builtins.float - sint64: builtins.int + uint64: builtins.int """float float = 2; int32 int32 = 3; int64 int64 = 4; uint32 uint32 = 5; - uint64 uint64 = 6; - sint32 sint32 = 7; """ + sint64: builtins.int + """sint32 sint32 = 7;""" + bool: builtins.bool """fixed32 fixed32 = 9; fixed64 fixed64 = 10; @@ -428,12 +430,13 @@ class Scalar(google.protobuf.message.Message): def __init__(self, *, double: builtins.float = ..., + uint64: builtins.int = ..., sint64: builtins.int = ..., bool: builtins.bool = ..., string: typing.Text = ..., bytes: builtins.bytes = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["bool",b"bool","bytes",b"bytes","double",b"double","scalar",b"scalar","sint64",b"sint64","string",b"string"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["bool",b"bool","bytes",b"bytes","double",b"double","scalar",b"scalar","sint64",b"sint64","string",b"string"]) -> None: ... - def WhichOneof(self, oneof_group: typing_extensions.Literal["scalar",b"scalar"]) -> typing.Optional[typing_extensions.Literal["double","sint64","bool","string","bytes"]]: ... + def HasField(self, field_name: typing_extensions.Literal["bool",b"bool","bytes",b"bytes","double",b"double","scalar",b"scalar","sint64",b"sint64","string",b"string","uint64",b"uint64"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["bool",b"bool","bytes",b"bytes","double",b"double","scalar",b"scalar","sint64",b"sint64","string",b"string","uint64",b"uint64"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["scalar",b"scalar"]) -> typing.Optional[typing_extensions.Literal["double","uint64","sint64","bool","string","bytes"]]: ... global___Scalar = Scalar diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py index d836a74bef2e..855fab353ae6 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py @@ -30,6 +30,7 @@ generate_shared_key, verify_hmac, ) +from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, CreateNodeResponse, @@ -56,6 +57,7 @@ PushTaskResRequest, GetRunRequest, PingRequest, + GetFabRequest, ] Response = Union[ @@ -65,6 +67,7 @@ PushTaskResResponse, GetRunResponse, PingResponse, + GetFabResponse, ] @@ -173,6 +176,7 @@ def _verify_node_id( PushTaskResRequest, GetRunRequest, PingRequest, + GetFabRequest, ], ) -> bool: if node_id is None: diff --git a/src/py/flwr/server/superlink/state/in_memory_state.py b/src/py/flwr/server/superlink/state/in_memory_state.py index e34d15374350..e09df8dc76f6 100644 --- a/src/py/flwr/server/superlink/state/in_memory_state.py +++ b/src/py/flwr/server/superlink/state/in_memory_state.py @@ -117,6 +117,23 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: log(ERROR, errors) return None + with self.lock: + # Check if the TaskIns it is replying to exists and is valid + task_ins_id = task_res.task.ancestry[0] + task_ins = self.task_ins_store.get(UUID(task_ins_id)) + + if task_ins is None: + log(ERROR, "TaskIns with task_id %s does not exist.", task_ins_id) + return None + + if task_ins.task.created_at + task_ins.task.ttl <= time.time(): + log( + ERROR, + "Failed to store TaskRes: TaskIns with task_id %s has expired.", + task_ins_id, + ) + return None + # Validate run_id if task_res.run_id not in self.run_ids: log(ERROR, "`run_id` is invalid") diff --git a/src/py/flwr/server/superlink/state/sqlite_state.py b/src/py/flwr/server/superlink/state/sqlite_state.py index 28d957a90bd3..d18683286196 100644 --- a/src/py/flwr/server/superlink/state/sqlite_state.py +++ b/src/py/flwr/server/superlink/state/sqlite_state.py @@ -372,7 +372,18 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: # Create task_id task_id = uuid4() - # Store TaskIns + task_ins_id = task_res.task.ancestry[0] + task_ins = self.get_valid_task_ins(task_ins_id) + if task_ins is None: + log( + ERROR, + "Failed to store TaskRes: " + "TaskIns with task_id %s does not exist or has expired.", + task_ins_id, + ) + return None + + # Store TaskRes task_res.task_id = str(task_id) data = (task_res_to_dict(task_res),) @@ -810,6 +821,33 @@ def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: log(ERROR, "`node_id` does not exist.") return False + def get_valid_task_ins(self, task_id: str) -> Optional[dict[str, Any]]: + """Check if the TaskIns exists and is valid (not expired). + + Return TaskIns if valid. + """ + query = """ + SELECT * + FROM task_ins + WHERE task_id = :task_id + """ + data = {"task_id": task_id} + rows = self.query(query, data) + if not rows: + # TaskIns does not exist + return None + + task_ins = rows[0] + created_at = task_ins["created_at"] + ttl = task_ins["ttl"] + current_time = time.time() + + # Check if TaskIns is expired + if ttl is not None and created_at + ttl <= current_time: + return None + + return task_ins + def dict_factory( cursor: sqlite3.Cursor, diff --git a/src/py/flwr/server/superlink/state/state_test.py b/src/py/flwr/server/superlink/state/state_test.py index 42c0768f1c7d..85cda1a5af9c 100644 --- a/src/py/flwr/server/superlink/state/state_test.py +++ b/src/py/flwr/server/superlink/state/state_test.py @@ -21,7 +21,6 @@ from abc import abstractmethod from datetime import datetime, timezone from unittest.mock import patch -from uuid import uuid4 from flwr.common import DEFAULT_TTL from flwr.common.constant import ErrorCode @@ -302,7 +301,10 @@ def test_task_res_store_and_retrieve_by_task_ins_id(self) -> None: # Prepare state: State = self.state_factory() run_id = state.create_run(None, None, "9f86d08", {}) - task_ins_id = uuid4() + + task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) + task_ins_id = state.store_task_ins(task_ins) + task_res = create_task_res( producer_node_id=0, anonymous=True, @@ -312,7 +314,9 @@ def test_task_res_store_and_retrieve_by_task_ins_id(self) -> None: # Execute task_res_uuid = state.store_task_res(task_res) - task_res_list = state.get_task_res(task_ids={task_ins_id}, limit=None) + + if task_ins_id is not None: + task_res_list = state.get_task_res(task_ids={task_ins_id}, limit=None) # Assert retrieved_task_res = task_res_list[0] @@ -507,11 +511,23 @@ def test_num_task_res(self) -> None: # Prepare state: State = self.state_factory() run_id = state.create_run(None, None, "9f86d08", {}) + + task_ins_0 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) + task_ins_1 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) + task_ins_id_0 = state.store_task_ins(task_ins_0) + task_ins_id_1 = state.store_task_ins(task_ins_1) + task_0 = create_task_res( - producer_node_id=0, anonymous=True, ancestry=["1"], run_id=run_id + producer_node_id=0, + anonymous=True, + ancestry=[str(task_ins_id_0)], + run_id=run_id, ) task_1 = create_task_res( - producer_node_id=0, anonymous=True, ancestry=["1"], run_id=run_id + producer_node_id=0, + anonymous=True, + ancestry=[str(task_ins_id_1)], + run_id=run_id, ) # Store two tasks @@ -664,6 +680,33 @@ def test_node_unavailable_error(self) -> None: assert err_taskres.task.HasField("error") assert err_taskres.task.error.code == ErrorCode.NODE_UNAVAILABLE + def test_store_task_res_task_ins_expired(self) -> None: + """Test behavior of store_task_res when the TaskIns it references is expired.""" + # Prepare + state: State = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}) + + task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) + task_ins.task.created_at = time.time() - task_ins.task.ttl + 0.5 + task_ins_id = state.store_task_ins(task_ins) + + with patch( + "time.time", + side_effect=lambda: task_ins.task.created_at + task_ins.task.ttl + 0.1, + ): # Expired by 0.1 seconds + task = create_task_res( + producer_node_id=0, + anonymous=True, + ancestry=[str(task_ins_id)], + run_id=run_id, + ) + + # Execute + result = state.store_task_res(task) + + # Assert + assert result is None + def create_task_ins( consumer_node_id: int, diff --git a/src/py/flwr/server/utils/validator.py b/src/py/flwr/server/utils/validator.py index fb3d0425db86..01f926c4985d 100644 --- a/src/py/flwr/server/utils/validator.py +++ b/src/py/flwr/server/utils/validator.py @@ -15,6 +15,7 @@ """Validators.""" +import time from typing import Union from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 @@ -47,6 +48,11 @@ def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> list[str # unix timestamp of 27 March 2024 00h:00m:00s UTC validation_errors.append("`pushed_at` is not a recent timestamp") + # Verify TTL and created_at time + current_time = time.time() + if tasks_ins_res.task.created_at + tasks_ins_res.task.ttl <= current_time: + validation_errors.append("Task TTL has expired") + # TaskIns specific if isinstance(tasks_ins_res, TaskIns): # Task producer diff --git a/src/py/flwr/server/utils/validator_test.py b/src/py/flwr/server/utils/validator_test.py index 20162883efea..ce8e3636467c 100644 --- a/src/py/flwr/server/utils/validator_test.py +++ b/src/py/flwr/server/utils/validator_test.py @@ -76,6 +76,24 @@ def test_is_valid_task_res(self) -> None: val_errors = validate_task_ins_or_res(msg) self.assertTrue(val_errors, (producer_node_id, anonymous, ancestry)) + def test_task_ttl_expired(self) -> None: + """Test validation for expired Task TTL.""" + # Prepare an expired TaskIns + expired_task_ins = create_task_ins(0, True) + expired_task_ins.task.created_at = time.time() - 10 # 10 seconds ago + expired_task_ins.task.ttl = 6 # 6 seconds TTL + + expired_task_res = create_task_res(0, True, ["1"]) + expired_task_res.task.created_at = time.time() - 10 # 10 seconds ago + expired_task_res.task.ttl = 6 # 6 seconds TTL + + # Execute & Assert + val_errors_ins = validate_task_ins_or_res(expired_task_ins) + self.assertIn("Task TTL has expired", val_errors_ins) + + val_errors_res = validate_task_ins_or_res(expired_task_res) + self.assertIn("Task TTL has expired", val_errors_res) + def create_task_ins( consumer_node_id: int,